0001
0002
0003
0004
0005
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007
0008 #include <linux/mm.h>
0009 #include <linux/sched.h>
0010 #include <linux/sched/mm.h>
0011 #include <linux/sched/coredump.h>
0012 #include <linux/sched/numa_balancing.h>
0013 #include <linux/highmem.h>
0014 #include <linux/hugetlb.h>
0015 #include <linux/mmu_notifier.h>
0016 #include <linux/rmap.h>
0017 #include <linux/swap.h>
0018 #include <linux/shrinker.h>
0019 #include <linux/mm_inline.h>
0020 #include <linux/swapops.h>
0021 #include <linux/backing-dev.h>
0022 #include <linux/dax.h>
0023 #include <linux/khugepaged.h>
0024 #include <linux/freezer.h>
0025 #include <linux/pfn_t.h>
0026 #include <linux/mman.h>
0027 #include <linux/memremap.h>
0028 #include <linux/pagemap.h>
0029 #include <linux/debugfs.h>
0030 #include <linux/migrate.h>
0031 #include <linux/hashtable.h>
0032 #include <linux/userfaultfd_k.h>
0033 #include <linux/page_idle.h>
0034 #include <linux/shmem_fs.h>
0035 #include <linux/oom.h>
0036 #include <linux/numa.h>
0037 #include <linux/page_owner.h>
0038 #include <linux/sched/sysctl.h>
0039
0040 #include <asm/tlb.h>
0041 #include <asm/pgalloc.h>
0042 #include "internal.h"
0043 #include "swap.h"
0044
0045 #define CREATE_TRACE_POINTS
0046 #include <trace/events/thp.h>
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 unsigned long transparent_hugepage_flags __read_mostly =
0057 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
0058 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
0059 #endif
0060 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
0061 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
0062 #endif
0063 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
0064 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
0065 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
0066
0067 static struct shrinker deferred_split_shrinker;
0068
0069 static atomic_t huge_zero_refcount;
0070 struct page *huge_zero_page __read_mostly;
0071 unsigned long huge_zero_pfn __read_mostly = ~0UL;
0072
0073 bool hugepage_vma_check(struct vm_area_struct *vma,
0074 unsigned long vm_flags,
0075 bool smaps, bool in_pf)
0076 {
0077 if (!vma->vm_mm)
0078 return false;
0079
0080
0081
0082
0083
0084
0085 if ((vm_flags & VM_NOHUGEPAGE) ||
0086 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
0087 return false;
0088
0089
0090
0091 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
0092 return false;
0093
0094
0095 if (vma_is_dax(vma))
0096 return in_pf;
0097
0098
0099
0100
0101
0102
0103 if (vm_flags & VM_NO_KHUGEPAGED)
0104 return false;
0105
0106
0107
0108
0109
0110
0111
0112 if (!in_pf &&
0113 !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
0114 return false;
0115
0116
0117
0118
0119
0120
0121 if (!in_pf && shmem_file(vma->vm_file))
0122 return shmem_huge_enabled(vma);
0123
0124 if (!hugepage_flags_enabled())
0125 return false;
0126
0127
0128 if (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always())
0129 return false;
0130
0131
0132 if (!in_pf && file_thp_enabled(vma))
0133 return true;
0134
0135 if (!vma_is_anonymous(vma))
0136 return false;
0137
0138 if (vma_is_temporary_stack(vma))
0139 return false;
0140
0141
0142
0143
0144
0145
0146
0147
0148 if (!vma->anon_vma)
0149 return (smaps || in_pf);
0150
0151 return true;
0152 }
0153
0154 static bool get_huge_zero_page(void)
0155 {
0156 struct page *zero_page;
0157 retry:
0158 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
0159 return true;
0160
0161 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
0162 HPAGE_PMD_ORDER);
0163 if (!zero_page) {
0164 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
0165 return false;
0166 }
0167 count_vm_event(THP_ZERO_PAGE_ALLOC);
0168 preempt_disable();
0169 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
0170 preempt_enable();
0171 __free_pages(zero_page, compound_order(zero_page));
0172 goto retry;
0173 }
0174 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
0175
0176
0177 atomic_set(&huge_zero_refcount, 2);
0178 preempt_enable();
0179 return true;
0180 }
0181
0182 static void put_huge_zero_page(void)
0183 {
0184
0185
0186
0187
0188 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
0189 }
0190
0191 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
0192 {
0193 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
0194 return READ_ONCE(huge_zero_page);
0195
0196 if (!get_huge_zero_page())
0197 return NULL;
0198
0199 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
0200 put_huge_zero_page();
0201
0202 return READ_ONCE(huge_zero_page);
0203 }
0204
0205 void mm_put_huge_zero_page(struct mm_struct *mm)
0206 {
0207 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
0208 put_huge_zero_page();
0209 }
0210
0211 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
0212 struct shrink_control *sc)
0213 {
0214
0215 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
0216 }
0217
0218 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
0219 struct shrink_control *sc)
0220 {
0221 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
0222 struct page *zero_page = xchg(&huge_zero_page, NULL);
0223 BUG_ON(zero_page == NULL);
0224 WRITE_ONCE(huge_zero_pfn, ~0UL);
0225 __free_pages(zero_page, compound_order(zero_page));
0226 return HPAGE_PMD_NR;
0227 }
0228
0229 return 0;
0230 }
0231
0232 static struct shrinker huge_zero_page_shrinker = {
0233 .count_objects = shrink_huge_zero_page_count,
0234 .scan_objects = shrink_huge_zero_page_scan,
0235 .seeks = DEFAULT_SEEKS,
0236 };
0237
0238 #ifdef CONFIG_SYSFS
0239 static ssize_t enabled_show(struct kobject *kobj,
0240 struct kobj_attribute *attr, char *buf)
0241 {
0242 const char *output;
0243
0244 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
0245 output = "[always] madvise never";
0246 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
0247 &transparent_hugepage_flags))
0248 output = "always [madvise] never";
0249 else
0250 output = "always madvise [never]";
0251
0252 return sysfs_emit(buf, "%s\n", output);
0253 }
0254
0255 static ssize_t enabled_store(struct kobject *kobj,
0256 struct kobj_attribute *attr,
0257 const char *buf, size_t count)
0258 {
0259 ssize_t ret = count;
0260
0261 if (sysfs_streq(buf, "always")) {
0262 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
0263 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
0264 } else if (sysfs_streq(buf, "madvise")) {
0265 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
0266 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
0267 } else if (sysfs_streq(buf, "never")) {
0268 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
0269 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
0270 } else
0271 ret = -EINVAL;
0272
0273 if (ret > 0) {
0274 int err = start_stop_khugepaged();
0275 if (err)
0276 ret = err;
0277 }
0278 return ret;
0279 }
0280
0281 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
0282
0283 ssize_t single_hugepage_flag_show(struct kobject *kobj,
0284 struct kobj_attribute *attr, char *buf,
0285 enum transparent_hugepage_flag flag)
0286 {
0287 return sysfs_emit(buf, "%d\n",
0288 !!test_bit(flag, &transparent_hugepage_flags));
0289 }
0290
0291 ssize_t single_hugepage_flag_store(struct kobject *kobj,
0292 struct kobj_attribute *attr,
0293 const char *buf, size_t count,
0294 enum transparent_hugepage_flag flag)
0295 {
0296 unsigned long value;
0297 int ret;
0298
0299 ret = kstrtoul(buf, 10, &value);
0300 if (ret < 0)
0301 return ret;
0302 if (value > 1)
0303 return -EINVAL;
0304
0305 if (value)
0306 set_bit(flag, &transparent_hugepage_flags);
0307 else
0308 clear_bit(flag, &transparent_hugepage_flags);
0309
0310 return count;
0311 }
0312
0313 static ssize_t defrag_show(struct kobject *kobj,
0314 struct kobj_attribute *attr, char *buf)
0315 {
0316 const char *output;
0317
0318 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
0319 &transparent_hugepage_flags))
0320 output = "[always] defer defer+madvise madvise never";
0321 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
0322 &transparent_hugepage_flags))
0323 output = "always [defer] defer+madvise madvise never";
0324 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
0325 &transparent_hugepage_flags))
0326 output = "always defer [defer+madvise] madvise never";
0327 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
0328 &transparent_hugepage_flags))
0329 output = "always defer defer+madvise [madvise] never";
0330 else
0331 output = "always defer defer+madvise madvise [never]";
0332
0333 return sysfs_emit(buf, "%s\n", output);
0334 }
0335
0336 static ssize_t defrag_store(struct kobject *kobj,
0337 struct kobj_attribute *attr,
0338 const char *buf, size_t count)
0339 {
0340 if (sysfs_streq(buf, "always")) {
0341 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
0342 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
0343 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
0344 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
0345 } else if (sysfs_streq(buf, "defer+madvise")) {
0346 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
0347 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
0348 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
0349 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
0350 } else if (sysfs_streq(buf, "defer")) {
0351 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
0352 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
0353 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
0354 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
0355 } else if (sysfs_streq(buf, "madvise")) {
0356 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
0357 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
0358 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
0359 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
0360 } else if (sysfs_streq(buf, "never")) {
0361 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
0362 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
0363 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
0364 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
0365 } else
0366 return -EINVAL;
0367
0368 return count;
0369 }
0370 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
0371
0372 static ssize_t use_zero_page_show(struct kobject *kobj,
0373 struct kobj_attribute *attr, char *buf)
0374 {
0375 return single_hugepage_flag_show(kobj, attr, buf,
0376 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
0377 }
0378 static ssize_t use_zero_page_store(struct kobject *kobj,
0379 struct kobj_attribute *attr, const char *buf, size_t count)
0380 {
0381 return single_hugepage_flag_store(kobj, attr, buf, count,
0382 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
0383 }
0384 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
0385
0386 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
0387 struct kobj_attribute *attr, char *buf)
0388 {
0389 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
0390 }
0391 static struct kobj_attribute hpage_pmd_size_attr =
0392 __ATTR_RO(hpage_pmd_size);
0393
0394 static struct attribute *hugepage_attr[] = {
0395 &enabled_attr.attr,
0396 &defrag_attr.attr,
0397 &use_zero_page_attr.attr,
0398 &hpage_pmd_size_attr.attr,
0399 #ifdef CONFIG_SHMEM
0400 &shmem_enabled_attr.attr,
0401 #endif
0402 NULL,
0403 };
0404
0405 static const struct attribute_group hugepage_attr_group = {
0406 .attrs = hugepage_attr,
0407 };
0408
0409 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
0410 {
0411 int err;
0412
0413 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
0414 if (unlikely(!*hugepage_kobj)) {
0415 pr_err("failed to create transparent hugepage kobject\n");
0416 return -ENOMEM;
0417 }
0418
0419 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
0420 if (err) {
0421 pr_err("failed to register transparent hugepage group\n");
0422 goto delete_obj;
0423 }
0424
0425 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
0426 if (err) {
0427 pr_err("failed to register transparent hugepage group\n");
0428 goto remove_hp_group;
0429 }
0430
0431 return 0;
0432
0433 remove_hp_group:
0434 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
0435 delete_obj:
0436 kobject_put(*hugepage_kobj);
0437 return err;
0438 }
0439
0440 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
0441 {
0442 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
0443 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
0444 kobject_put(hugepage_kobj);
0445 }
0446 #else
0447 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
0448 {
0449 return 0;
0450 }
0451
0452 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
0453 {
0454 }
0455 #endif
0456
0457 static int __init hugepage_init(void)
0458 {
0459 int err;
0460 struct kobject *hugepage_kobj;
0461
0462 if (!has_transparent_hugepage()) {
0463
0464
0465
0466
0467 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
0468 return -EINVAL;
0469 }
0470
0471
0472
0473
0474 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
0475
0476
0477
0478
0479 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
0480
0481 err = hugepage_init_sysfs(&hugepage_kobj);
0482 if (err)
0483 goto err_sysfs;
0484
0485 err = khugepaged_init();
0486 if (err)
0487 goto err_slab;
0488
0489 err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
0490 if (err)
0491 goto err_hzp_shrinker;
0492 err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
0493 if (err)
0494 goto err_split_shrinker;
0495
0496
0497
0498
0499
0500
0501 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
0502 transparent_hugepage_flags = 0;
0503 return 0;
0504 }
0505
0506 err = start_stop_khugepaged();
0507 if (err)
0508 goto err_khugepaged;
0509
0510 return 0;
0511 err_khugepaged:
0512 unregister_shrinker(&deferred_split_shrinker);
0513 err_split_shrinker:
0514 unregister_shrinker(&huge_zero_page_shrinker);
0515 err_hzp_shrinker:
0516 khugepaged_destroy();
0517 err_slab:
0518 hugepage_exit_sysfs(hugepage_kobj);
0519 err_sysfs:
0520 return err;
0521 }
0522 subsys_initcall(hugepage_init);
0523
0524 static int __init setup_transparent_hugepage(char *str)
0525 {
0526 int ret = 0;
0527 if (!str)
0528 goto out;
0529 if (!strcmp(str, "always")) {
0530 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
0531 &transparent_hugepage_flags);
0532 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
0533 &transparent_hugepage_flags);
0534 ret = 1;
0535 } else if (!strcmp(str, "madvise")) {
0536 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
0537 &transparent_hugepage_flags);
0538 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
0539 &transparent_hugepage_flags);
0540 ret = 1;
0541 } else if (!strcmp(str, "never")) {
0542 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
0543 &transparent_hugepage_flags);
0544 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
0545 &transparent_hugepage_flags);
0546 ret = 1;
0547 }
0548 out:
0549 if (!ret)
0550 pr_warn("transparent_hugepage= cannot parse, ignored\n");
0551 return ret;
0552 }
0553 __setup("transparent_hugepage=", setup_transparent_hugepage);
0554
0555 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
0556 {
0557 if (likely(vma->vm_flags & VM_WRITE))
0558 pmd = pmd_mkwrite(pmd);
0559 return pmd;
0560 }
0561
0562 #ifdef CONFIG_MEMCG
0563 static inline struct deferred_split *get_deferred_split_queue(struct page *page)
0564 {
0565 struct mem_cgroup *memcg = page_memcg(compound_head(page));
0566 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
0567
0568 if (memcg)
0569 return &memcg->deferred_split_queue;
0570 else
0571 return &pgdat->deferred_split_queue;
0572 }
0573 #else
0574 static inline struct deferred_split *get_deferred_split_queue(struct page *page)
0575 {
0576 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
0577
0578 return &pgdat->deferred_split_queue;
0579 }
0580 #endif
0581
0582 void prep_transhuge_page(struct page *page)
0583 {
0584
0585
0586
0587
0588
0589 INIT_LIST_HEAD(page_deferred_list(page));
0590 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
0591 }
0592
0593 static inline bool is_transparent_hugepage(struct page *page)
0594 {
0595 if (!PageCompound(page))
0596 return false;
0597
0598 page = compound_head(page);
0599 return is_huge_zero_page(page) ||
0600 page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
0601 }
0602
0603 static unsigned long __thp_get_unmapped_area(struct file *filp,
0604 unsigned long addr, unsigned long len,
0605 loff_t off, unsigned long flags, unsigned long size)
0606 {
0607 loff_t off_end = off + len;
0608 loff_t off_align = round_up(off, size);
0609 unsigned long len_pad, ret;
0610
0611 if (off_end <= off_align || (off_end - off_align) < size)
0612 return 0;
0613
0614 len_pad = len + size;
0615 if (len_pad < len || (off + len_pad) < off)
0616 return 0;
0617
0618 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
0619 off >> PAGE_SHIFT, flags);
0620
0621
0622
0623
0624
0625 if (IS_ERR_VALUE(ret))
0626 return 0;
0627
0628
0629
0630
0631
0632 if (ret == addr)
0633 return addr;
0634
0635 ret += (off - ret) & (size - 1);
0636 return ret;
0637 }
0638
0639 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
0640 unsigned long len, unsigned long pgoff, unsigned long flags)
0641 {
0642 unsigned long ret;
0643 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
0644
0645 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
0646 if (ret)
0647 return ret;
0648
0649 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
0650 }
0651 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
0652
0653 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
0654 struct page *page, gfp_t gfp)
0655 {
0656 struct vm_area_struct *vma = vmf->vma;
0657 pgtable_t pgtable;
0658 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
0659 vm_fault_t ret = 0;
0660
0661 VM_BUG_ON_PAGE(!PageCompound(page), page);
0662
0663 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) {
0664 put_page(page);
0665 count_vm_event(THP_FAULT_FALLBACK);
0666 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
0667 return VM_FAULT_FALLBACK;
0668 }
0669 cgroup_throttle_swaprate(page, gfp);
0670
0671 pgtable = pte_alloc_one(vma->vm_mm);
0672 if (unlikely(!pgtable)) {
0673 ret = VM_FAULT_OOM;
0674 goto release;
0675 }
0676
0677 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
0678
0679
0680
0681
0682
0683 __SetPageUptodate(page);
0684
0685 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
0686 if (unlikely(!pmd_none(*vmf->pmd))) {
0687 goto unlock_release;
0688 } else {
0689 pmd_t entry;
0690
0691 ret = check_stable_address_space(vma->vm_mm);
0692 if (ret)
0693 goto unlock_release;
0694
0695
0696 if (userfaultfd_missing(vma)) {
0697 spin_unlock(vmf->ptl);
0698 put_page(page);
0699 pte_free(vma->vm_mm, pgtable);
0700 ret = handle_userfault(vmf, VM_UFFD_MISSING);
0701 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
0702 return ret;
0703 }
0704
0705 entry = mk_huge_pmd(page, vma->vm_page_prot);
0706 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
0707 page_add_new_anon_rmap(page, vma, haddr);
0708 lru_cache_add_inactive_or_unevictable(page, vma);
0709 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
0710 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
0711 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
0712 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
0713 mm_inc_nr_ptes(vma->vm_mm);
0714 spin_unlock(vmf->ptl);
0715 count_vm_event(THP_FAULT_ALLOC);
0716 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
0717 }
0718
0719 return 0;
0720 unlock_release:
0721 spin_unlock(vmf->ptl);
0722 release:
0723 if (pgtable)
0724 pte_free(vma->vm_mm, pgtable);
0725 put_page(page);
0726 return ret;
0727
0728 }
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
0740 {
0741 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
0742
0743
0744 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
0745 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
0746
0747
0748 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
0749 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
0750
0751
0752 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
0753 return GFP_TRANSHUGE_LIGHT |
0754 (vma_madvised ? __GFP_DIRECT_RECLAIM :
0755 __GFP_KSWAPD_RECLAIM);
0756
0757
0758 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
0759 return GFP_TRANSHUGE_LIGHT |
0760 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
0761
0762 return GFP_TRANSHUGE_LIGHT;
0763 }
0764
0765
0766 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
0767 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
0768 struct page *zero_page)
0769 {
0770 pmd_t entry;
0771 if (!pmd_none(*pmd))
0772 return;
0773 entry = mk_pmd(zero_page, vma->vm_page_prot);
0774 entry = pmd_mkhuge(entry);
0775 if (pgtable)
0776 pgtable_trans_huge_deposit(mm, pmd, pgtable);
0777 set_pmd_at(mm, haddr, pmd, entry);
0778 mm_inc_nr_ptes(mm);
0779 }
0780
0781 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
0782 {
0783 struct vm_area_struct *vma = vmf->vma;
0784 gfp_t gfp;
0785 struct folio *folio;
0786 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
0787
0788 if (!transhuge_vma_suitable(vma, haddr))
0789 return VM_FAULT_FALLBACK;
0790 if (unlikely(anon_vma_prepare(vma)))
0791 return VM_FAULT_OOM;
0792 khugepaged_enter_vma(vma, vma->vm_flags);
0793
0794 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
0795 !mm_forbids_zeropage(vma->vm_mm) &&
0796 transparent_hugepage_use_zero_page()) {
0797 pgtable_t pgtable;
0798 struct page *zero_page;
0799 vm_fault_t ret;
0800 pgtable = pte_alloc_one(vma->vm_mm);
0801 if (unlikely(!pgtable))
0802 return VM_FAULT_OOM;
0803 zero_page = mm_get_huge_zero_page(vma->vm_mm);
0804 if (unlikely(!zero_page)) {
0805 pte_free(vma->vm_mm, pgtable);
0806 count_vm_event(THP_FAULT_FALLBACK);
0807 return VM_FAULT_FALLBACK;
0808 }
0809 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
0810 ret = 0;
0811 if (pmd_none(*vmf->pmd)) {
0812 ret = check_stable_address_space(vma->vm_mm);
0813 if (ret) {
0814 spin_unlock(vmf->ptl);
0815 pte_free(vma->vm_mm, pgtable);
0816 } else if (userfaultfd_missing(vma)) {
0817 spin_unlock(vmf->ptl);
0818 pte_free(vma->vm_mm, pgtable);
0819 ret = handle_userfault(vmf, VM_UFFD_MISSING);
0820 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
0821 } else {
0822 set_huge_zero_page(pgtable, vma->vm_mm, vma,
0823 haddr, vmf->pmd, zero_page);
0824 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
0825 spin_unlock(vmf->ptl);
0826 }
0827 } else {
0828 spin_unlock(vmf->ptl);
0829 pte_free(vma->vm_mm, pgtable);
0830 }
0831 return ret;
0832 }
0833 gfp = vma_thp_gfp_mask(vma);
0834 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
0835 if (unlikely(!folio)) {
0836 count_vm_event(THP_FAULT_FALLBACK);
0837 return VM_FAULT_FALLBACK;
0838 }
0839 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
0840 }
0841
0842 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
0843 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
0844 pgtable_t pgtable)
0845 {
0846 struct mm_struct *mm = vma->vm_mm;
0847 pmd_t entry;
0848 spinlock_t *ptl;
0849
0850 ptl = pmd_lock(mm, pmd);
0851 if (!pmd_none(*pmd)) {
0852 if (write) {
0853 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
0854 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
0855 goto out_unlock;
0856 }
0857 entry = pmd_mkyoung(*pmd);
0858 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
0859 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
0860 update_mmu_cache_pmd(vma, addr, pmd);
0861 }
0862
0863 goto out_unlock;
0864 }
0865
0866 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
0867 if (pfn_t_devmap(pfn))
0868 entry = pmd_mkdevmap(entry);
0869 if (write) {
0870 entry = pmd_mkyoung(pmd_mkdirty(entry));
0871 entry = maybe_pmd_mkwrite(entry, vma);
0872 }
0873
0874 if (pgtable) {
0875 pgtable_trans_huge_deposit(mm, pmd, pgtable);
0876 mm_inc_nr_ptes(mm);
0877 pgtable = NULL;
0878 }
0879
0880 set_pmd_at(mm, addr, pmd, entry);
0881 update_mmu_cache_pmd(vma, addr, pmd);
0882
0883 out_unlock:
0884 spin_unlock(ptl);
0885 if (pgtable)
0886 pte_free(mm, pgtable);
0887 }
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
0903 pgprot_t pgprot, bool write)
0904 {
0905 unsigned long addr = vmf->address & PMD_MASK;
0906 struct vm_area_struct *vma = vmf->vma;
0907 pgtable_t pgtable = NULL;
0908
0909
0910
0911
0912
0913
0914 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
0915 !pfn_t_devmap(pfn));
0916 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
0917 (VM_PFNMAP|VM_MIXEDMAP));
0918 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
0919
0920 if (addr < vma->vm_start || addr >= vma->vm_end)
0921 return VM_FAULT_SIGBUS;
0922
0923 if (arch_needs_pgtable_deposit()) {
0924 pgtable = pte_alloc_one(vma->vm_mm);
0925 if (!pgtable)
0926 return VM_FAULT_OOM;
0927 }
0928
0929 track_pfn_insert(vma, &pgprot, pfn);
0930
0931 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
0932 return VM_FAULT_NOPAGE;
0933 }
0934 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
0935
0936 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
0937 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
0938 {
0939 if (likely(vma->vm_flags & VM_WRITE))
0940 pud = pud_mkwrite(pud);
0941 return pud;
0942 }
0943
0944 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
0945 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
0946 {
0947 struct mm_struct *mm = vma->vm_mm;
0948 pud_t entry;
0949 spinlock_t *ptl;
0950
0951 ptl = pud_lock(mm, pud);
0952 if (!pud_none(*pud)) {
0953 if (write) {
0954 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
0955 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
0956 goto out_unlock;
0957 }
0958 entry = pud_mkyoung(*pud);
0959 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
0960 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
0961 update_mmu_cache_pud(vma, addr, pud);
0962 }
0963 goto out_unlock;
0964 }
0965
0966 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
0967 if (pfn_t_devmap(pfn))
0968 entry = pud_mkdevmap(entry);
0969 if (write) {
0970 entry = pud_mkyoung(pud_mkdirty(entry));
0971 entry = maybe_pud_mkwrite(entry, vma);
0972 }
0973 set_pud_at(mm, addr, pud, entry);
0974 update_mmu_cache_pud(vma, addr, pud);
0975
0976 out_unlock:
0977 spin_unlock(ptl);
0978 }
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
0994 pgprot_t pgprot, bool write)
0995 {
0996 unsigned long addr = vmf->address & PUD_MASK;
0997 struct vm_area_struct *vma = vmf->vma;
0998
0999
1000
1001
1002
1003
1004 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1005 !pfn_t_devmap(pfn));
1006 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1007 (VM_PFNMAP|VM_MIXEDMAP));
1008 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1009
1010 if (addr < vma->vm_start || addr >= vma->vm_end)
1011 return VM_FAULT_SIGBUS;
1012
1013 track_pfn_insert(vma, &pgprot, pfn);
1014
1015 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
1016 return VM_FAULT_NOPAGE;
1017 }
1018 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
1019 #endif
1020
1021 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1022 pmd_t *pmd, bool write)
1023 {
1024 pmd_t _pmd;
1025
1026 _pmd = pmd_mkyoung(*pmd);
1027 if (write)
1028 _pmd = pmd_mkdirty(_pmd);
1029 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1030 pmd, _pmd, write))
1031 update_mmu_cache_pmd(vma, addr, pmd);
1032 }
1033
1034 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1035 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1036 {
1037 unsigned long pfn = pmd_pfn(*pmd);
1038 struct mm_struct *mm = vma->vm_mm;
1039 struct page *page;
1040
1041 assert_spin_locked(pmd_lockptr(mm, pmd));
1042
1043
1044 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
1045 (FOLL_PIN | FOLL_GET)))
1046 return NULL;
1047
1048 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1049 return NULL;
1050
1051 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1052 ;
1053 else
1054 return NULL;
1055
1056 if (flags & FOLL_TOUCH)
1057 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1058
1059
1060
1061
1062
1063 if (!(flags & (FOLL_GET | FOLL_PIN)))
1064 return ERR_PTR(-EEXIST);
1065
1066 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1067 *pgmap = get_dev_pagemap(pfn, *pgmap);
1068 if (!*pgmap)
1069 return ERR_PTR(-EFAULT);
1070 page = pfn_to_page(pfn);
1071 if (!try_grab_page(page, flags))
1072 page = ERR_PTR(-ENOMEM);
1073
1074 return page;
1075 }
1076
1077 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1078 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1079 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1080 {
1081 spinlock_t *dst_ptl, *src_ptl;
1082 struct page *src_page;
1083 pmd_t pmd;
1084 pgtable_t pgtable = NULL;
1085 int ret = -ENOMEM;
1086
1087
1088 if (!vma_is_anonymous(dst_vma))
1089 return 0;
1090
1091 pgtable = pte_alloc_one(dst_mm);
1092 if (unlikely(!pgtable))
1093 goto out;
1094
1095 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1096 src_ptl = pmd_lockptr(src_mm, src_pmd);
1097 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1098
1099 ret = -EAGAIN;
1100 pmd = *src_pmd;
1101
1102 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1103 if (unlikely(is_swap_pmd(pmd))) {
1104 swp_entry_t entry = pmd_to_swp_entry(pmd);
1105
1106 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1107 if (!is_readable_migration_entry(entry)) {
1108 entry = make_readable_migration_entry(
1109 swp_offset(entry));
1110 pmd = swp_entry_to_pmd(entry);
1111 if (pmd_swp_soft_dirty(*src_pmd))
1112 pmd = pmd_swp_mksoft_dirty(pmd);
1113 if (pmd_swp_uffd_wp(*src_pmd))
1114 pmd = pmd_swp_mkuffd_wp(pmd);
1115 set_pmd_at(src_mm, addr, src_pmd, pmd);
1116 }
1117 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1118 mm_inc_nr_ptes(dst_mm);
1119 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1120 if (!userfaultfd_wp(dst_vma))
1121 pmd = pmd_swp_clear_uffd_wp(pmd);
1122 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1123 ret = 0;
1124 goto out_unlock;
1125 }
1126 #endif
1127
1128 if (unlikely(!pmd_trans_huge(pmd))) {
1129 pte_free(dst_mm, pgtable);
1130 goto out_unlock;
1131 }
1132
1133
1134
1135
1136
1137 if (is_huge_zero_pmd(pmd)) {
1138
1139
1140
1141
1142
1143 mm_get_huge_zero_page(dst_mm);
1144 goto out_zero_page;
1145 }
1146
1147 src_page = pmd_page(pmd);
1148 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1149
1150 get_page(src_page);
1151 if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
1152
1153 put_page(src_page);
1154 pte_free(dst_mm, pgtable);
1155 spin_unlock(src_ptl);
1156 spin_unlock(dst_ptl);
1157 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1158 return -EAGAIN;
1159 }
1160 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1161 out_zero_page:
1162 mm_inc_nr_ptes(dst_mm);
1163 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1164 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1165 if (!userfaultfd_wp(dst_vma))
1166 pmd = pmd_clear_uffd_wp(pmd);
1167 pmd = pmd_mkold(pmd_wrprotect(pmd));
1168 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1169
1170 ret = 0;
1171 out_unlock:
1172 spin_unlock(src_ptl);
1173 spin_unlock(dst_ptl);
1174 out:
1175 return ret;
1176 }
1177
1178 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1179 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1180 pud_t *pud, bool write)
1181 {
1182 pud_t _pud;
1183
1184 _pud = pud_mkyoung(*pud);
1185 if (write)
1186 _pud = pud_mkdirty(_pud);
1187 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1188 pud, _pud, write))
1189 update_mmu_cache_pud(vma, addr, pud);
1190 }
1191
1192 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1193 pud_t *pud, int flags, struct dev_pagemap **pgmap)
1194 {
1195 unsigned long pfn = pud_pfn(*pud);
1196 struct mm_struct *mm = vma->vm_mm;
1197 struct page *page;
1198
1199 assert_spin_locked(pud_lockptr(mm, pud));
1200
1201 if (flags & FOLL_WRITE && !pud_write(*pud))
1202 return NULL;
1203
1204
1205 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
1206 (FOLL_PIN | FOLL_GET)))
1207 return NULL;
1208
1209 if (pud_present(*pud) && pud_devmap(*pud))
1210 ;
1211 else
1212 return NULL;
1213
1214 if (flags & FOLL_TOUCH)
1215 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1216
1217
1218
1219
1220
1221
1222
1223 if (!(flags & (FOLL_GET | FOLL_PIN)))
1224 return ERR_PTR(-EEXIST);
1225
1226 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1227 *pgmap = get_dev_pagemap(pfn, *pgmap);
1228 if (!*pgmap)
1229 return ERR_PTR(-EFAULT);
1230 page = pfn_to_page(pfn);
1231 if (!try_grab_page(page, flags))
1232 page = ERR_PTR(-ENOMEM);
1233
1234 return page;
1235 }
1236
1237 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1238 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1239 struct vm_area_struct *vma)
1240 {
1241 spinlock_t *dst_ptl, *src_ptl;
1242 pud_t pud;
1243 int ret;
1244
1245 dst_ptl = pud_lock(dst_mm, dst_pud);
1246 src_ptl = pud_lockptr(src_mm, src_pud);
1247 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1248
1249 ret = -EAGAIN;
1250 pud = *src_pud;
1251 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1252 goto out_unlock;
1253
1254
1255
1256
1257
1258
1259 if (is_huge_zero_pud(pud)) {
1260
1261 }
1262
1263
1264
1265
1266
1267 pudp_set_wrprotect(src_mm, addr, src_pud);
1268 pud = pud_mkold(pud_wrprotect(pud));
1269 set_pud_at(dst_mm, addr, dst_pud, pud);
1270
1271 ret = 0;
1272 out_unlock:
1273 spin_unlock(src_ptl);
1274 spin_unlock(dst_ptl);
1275 return ret;
1276 }
1277
1278 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1279 {
1280 bool write = vmf->flags & FAULT_FLAG_WRITE;
1281
1282 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1283 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1284 goto unlock;
1285
1286 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1287 unlock:
1288 spin_unlock(vmf->ptl);
1289 }
1290 #endif
1291
1292 void huge_pmd_set_accessed(struct vm_fault *vmf)
1293 {
1294 bool write = vmf->flags & FAULT_FLAG_WRITE;
1295
1296 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1297 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1298 goto unlock;
1299
1300 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1301
1302 unlock:
1303 spin_unlock(vmf->ptl);
1304 }
1305
1306 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1307 {
1308 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1309 struct vm_area_struct *vma = vmf->vma;
1310 struct page *page;
1311 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1312 pmd_t orig_pmd = vmf->orig_pmd;
1313
1314 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1315 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1316
1317 VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
1318 VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
1319
1320 if (is_huge_zero_pmd(orig_pmd))
1321 goto fallback;
1322
1323 spin_lock(vmf->ptl);
1324
1325 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1326 spin_unlock(vmf->ptl);
1327 return 0;
1328 }
1329
1330 page = pmd_page(orig_pmd);
1331 VM_BUG_ON_PAGE(!PageHead(page), page);
1332
1333
1334 if (PageAnonExclusive(page))
1335 goto reuse;
1336
1337 if (!trylock_page(page)) {
1338 get_page(page);
1339 spin_unlock(vmf->ptl);
1340 lock_page(page);
1341 spin_lock(vmf->ptl);
1342 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1343 spin_unlock(vmf->ptl);
1344 unlock_page(page);
1345 put_page(page);
1346 return 0;
1347 }
1348 put_page(page);
1349 }
1350
1351
1352 if (PageAnonExclusive(page)) {
1353 unlock_page(page);
1354 goto reuse;
1355 }
1356
1357
1358
1359
1360
1361
1362 if (page_count(page) > 1 + PageSwapCache(page) * thp_nr_pages(page))
1363 goto unlock_fallback;
1364 if (PageSwapCache(page))
1365 try_to_free_swap(page);
1366 if (page_count(page) == 1) {
1367 pmd_t entry;
1368
1369 page_move_anon_rmap(page, vma);
1370 unlock_page(page);
1371 reuse:
1372 if (unlikely(unshare)) {
1373 spin_unlock(vmf->ptl);
1374 return 0;
1375 }
1376 entry = pmd_mkyoung(orig_pmd);
1377 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1378 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1379 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1380 spin_unlock(vmf->ptl);
1381 return VM_FAULT_WRITE;
1382 }
1383
1384 unlock_fallback:
1385 unlock_page(page);
1386 spin_unlock(vmf->ptl);
1387 fallback:
1388 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1389 return VM_FAULT_FALLBACK;
1390 }
1391
1392
1393 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1394 struct vm_area_struct *vma,
1395 unsigned int flags)
1396 {
1397
1398 if (pmd_write(pmd))
1399 return true;
1400
1401
1402 if (!(flags & FOLL_FORCE))
1403 return false;
1404
1405
1406 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1407 return false;
1408
1409
1410 if (!(vma->vm_flags & VM_MAYWRITE))
1411 return false;
1412
1413
1414 if (vma->vm_flags & VM_WRITE)
1415 return false;
1416
1417
1418
1419
1420
1421 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1422 return false;
1423
1424
1425 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1426 return false;
1427 return !userfaultfd_huge_pmd_wp(vma, pmd);
1428 }
1429
1430 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1431 unsigned long addr,
1432 pmd_t *pmd,
1433 unsigned int flags)
1434 {
1435 struct mm_struct *mm = vma->vm_mm;
1436 struct page *page;
1437
1438 assert_spin_locked(pmd_lockptr(mm, pmd));
1439
1440 page = pmd_page(*pmd);
1441 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1442
1443 if ((flags & FOLL_WRITE) &&
1444 !can_follow_write_pmd(*pmd, page, vma, flags))
1445 return NULL;
1446
1447
1448 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1449 return ERR_PTR(-EFAULT);
1450
1451
1452 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1453 return NULL;
1454
1455 if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
1456 return ERR_PTR(-EMLINK);
1457
1458 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1459 !PageAnonExclusive(page), page);
1460
1461 if (!try_grab_page(page, flags))
1462 return ERR_PTR(-ENOMEM);
1463
1464 if (flags & FOLL_TOUCH)
1465 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1466
1467 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1468 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1469
1470 return page;
1471 }
1472
1473
1474 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1475 {
1476 struct vm_area_struct *vma = vmf->vma;
1477 pmd_t oldpmd = vmf->orig_pmd;
1478 pmd_t pmd;
1479 struct page *page;
1480 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1481 int page_nid = NUMA_NO_NODE;
1482 int target_nid, last_cpupid = -1;
1483 bool migrated = false;
1484 bool was_writable = pmd_savedwrite(oldpmd);
1485 int flags = 0;
1486
1487 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1488 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1489 spin_unlock(vmf->ptl);
1490 goto out;
1491 }
1492
1493 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1494 page = vm_normal_page_pmd(vma, haddr, pmd);
1495 if (!page)
1496 goto out_map;
1497
1498
1499 if (!was_writable)
1500 flags |= TNF_NO_GROUP;
1501
1502 page_nid = page_to_nid(page);
1503 last_cpupid = page_cpupid_last(page);
1504 target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
1505 &flags);
1506
1507 if (target_nid == NUMA_NO_NODE) {
1508 put_page(page);
1509 goto out_map;
1510 }
1511
1512 spin_unlock(vmf->ptl);
1513
1514 migrated = migrate_misplaced_page(page, vma, target_nid);
1515 if (migrated) {
1516 flags |= TNF_MIGRATED;
1517 page_nid = target_nid;
1518 } else {
1519 flags |= TNF_MIGRATE_FAIL;
1520 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1521 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1522 spin_unlock(vmf->ptl);
1523 goto out;
1524 }
1525 goto out_map;
1526 }
1527
1528 out:
1529 if (page_nid != NUMA_NO_NODE)
1530 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1531 flags);
1532
1533 return 0;
1534
1535 out_map:
1536
1537 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1538 pmd = pmd_mkyoung(pmd);
1539 if (was_writable)
1540 pmd = pmd_mkwrite(pmd);
1541 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1542 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1543 spin_unlock(vmf->ptl);
1544 goto out;
1545 }
1546
1547
1548
1549
1550
1551 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1552 pmd_t *pmd, unsigned long addr, unsigned long next)
1553 {
1554 spinlock_t *ptl;
1555 pmd_t orig_pmd;
1556 struct page *page;
1557 struct mm_struct *mm = tlb->mm;
1558 bool ret = false;
1559
1560 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1561
1562 ptl = pmd_trans_huge_lock(pmd, vma);
1563 if (!ptl)
1564 goto out_unlocked;
1565
1566 orig_pmd = *pmd;
1567 if (is_huge_zero_pmd(orig_pmd))
1568 goto out;
1569
1570 if (unlikely(!pmd_present(orig_pmd))) {
1571 VM_BUG_ON(thp_migration_supported() &&
1572 !is_pmd_migration_entry(orig_pmd));
1573 goto out;
1574 }
1575
1576 page = pmd_page(orig_pmd);
1577
1578
1579
1580
1581 if (total_mapcount(page) != 1)
1582 goto out;
1583
1584 if (!trylock_page(page))
1585 goto out;
1586
1587
1588
1589
1590
1591 if (next - addr != HPAGE_PMD_SIZE) {
1592 get_page(page);
1593 spin_unlock(ptl);
1594 split_huge_page(page);
1595 unlock_page(page);
1596 put_page(page);
1597 goto out_unlocked;
1598 }
1599
1600 if (PageDirty(page))
1601 ClearPageDirty(page);
1602 unlock_page(page);
1603
1604 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1605 pmdp_invalidate(vma, addr, pmd);
1606 orig_pmd = pmd_mkold(orig_pmd);
1607 orig_pmd = pmd_mkclean(orig_pmd);
1608
1609 set_pmd_at(mm, addr, pmd, orig_pmd);
1610 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1611 }
1612
1613 mark_page_lazyfree(page);
1614 ret = true;
1615 out:
1616 spin_unlock(ptl);
1617 out_unlocked:
1618 return ret;
1619 }
1620
1621 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1622 {
1623 pgtable_t pgtable;
1624
1625 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1626 pte_free(mm, pgtable);
1627 mm_dec_nr_ptes(mm);
1628 }
1629
1630 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1631 pmd_t *pmd, unsigned long addr)
1632 {
1633 pmd_t orig_pmd;
1634 spinlock_t *ptl;
1635
1636 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1637
1638 ptl = __pmd_trans_huge_lock(pmd, vma);
1639 if (!ptl)
1640 return 0;
1641
1642
1643
1644
1645
1646
1647 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1648 tlb->fullmm);
1649 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1650 if (vma_is_special_huge(vma)) {
1651 if (arch_needs_pgtable_deposit())
1652 zap_deposited_table(tlb->mm, pmd);
1653 spin_unlock(ptl);
1654 } else if (is_huge_zero_pmd(orig_pmd)) {
1655 zap_deposited_table(tlb->mm, pmd);
1656 spin_unlock(ptl);
1657 } else {
1658 struct page *page = NULL;
1659 int flush_needed = 1;
1660
1661 if (pmd_present(orig_pmd)) {
1662 page = pmd_page(orig_pmd);
1663 page_remove_rmap(page, vma, true);
1664 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1665 VM_BUG_ON_PAGE(!PageHead(page), page);
1666 } else if (thp_migration_supported()) {
1667 swp_entry_t entry;
1668
1669 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1670 entry = pmd_to_swp_entry(orig_pmd);
1671 page = pfn_swap_entry_to_page(entry);
1672 flush_needed = 0;
1673 } else
1674 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1675
1676 if (PageAnon(page)) {
1677 zap_deposited_table(tlb->mm, pmd);
1678 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1679 } else {
1680 if (arch_needs_pgtable_deposit())
1681 zap_deposited_table(tlb->mm, pmd);
1682 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1683 }
1684
1685 spin_unlock(ptl);
1686 if (flush_needed)
1687 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1688 }
1689 return 1;
1690 }
1691
1692 #ifndef pmd_move_must_withdraw
1693 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1694 spinlock_t *old_pmd_ptl,
1695 struct vm_area_struct *vma)
1696 {
1697
1698
1699
1700
1701
1702
1703 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1704 }
1705 #endif
1706
1707 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1708 {
1709 #ifdef CONFIG_MEM_SOFT_DIRTY
1710 if (unlikely(is_pmd_migration_entry(pmd)))
1711 pmd = pmd_swp_mksoft_dirty(pmd);
1712 else if (pmd_present(pmd))
1713 pmd = pmd_mksoft_dirty(pmd);
1714 #endif
1715 return pmd;
1716 }
1717
1718 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1719 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1720 {
1721 spinlock_t *old_ptl, *new_ptl;
1722 pmd_t pmd;
1723 struct mm_struct *mm = vma->vm_mm;
1724 bool force_flush = false;
1725
1726
1727
1728
1729
1730 if (WARN_ON(!pmd_none(*new_pmd))) {
1731 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1732 return false;
1733 }
1734
1735
1736
1737
1738
1739 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1740 if (old_ptl) {
1741 new_ptl = pmd_lockptr(mm, new_pmd);
1742 if (new_ptl != old_ptl)
1743 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1744 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1745 if (pmd_present(pmd))
1746 force_flush = true;
1747 VM_BUG_ON(!pmd_none(*new_pmd));
1748
1749 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1750 pgtable_t pgtable;
1751 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1752 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1753 }
1754 pmd = move_soft_dirty_pmd(pmd);
1755 set_pmd_at(mm, new_addr, new_pmd, pmd);
1756 if (force_flush)
1757 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1758 if (new_ptl != old_ptl)
1759 spin_unlock(new_ptl);
1760 spin_unlock(old_ptl);
1761 return true;
1762 }
1763 return false;
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1774 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1775 unsigned long cp_flags)
1776 {
1777 struct mm_struct *mm = vma->vm_mm;
1778 spinlock_t *ptl;
1779 pmd_t oldpmd, entry;
1780 bool preserve_write;
1781 int ret;
1782 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1783 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1784 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1785
1786 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1787
1788 if (prot_numa && !thp_migration_supported())
1789 return 1;
1790
1791 ptl = __pmd_trans_huge_lock(pmd, vma);
1792 if (!ptl)
1793 return 0;
1794
1795 preserve_write = prot_numa && pmd_write(*pmd);
1796 ret = 1;
1797
1798 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1799 if (is_swap_pmd(*pmd)) {
1800 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1801 struct page *page = pfn_swap_entry_to_page(entry);
1802
1803 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1804 if (is_writable_migration_entry(entry)) {
1805 pmd_t newpmd;
1806
1807
1808
1809
1810 if (PageAnon(page))
1811 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
1812 else
1813 entry = make_readable_migration_entry(swp_offset(entry));
1814 newpmd = swp_entry_to_pmd(entry);
1815 if (pmd_swp_soft_dirty(*pmd))
1816 newpmd = pmd_swp_mksoft_dirty(newpmd);
1817 if (pmd_swp_uffd_wp(*pmd))
1818 newpmd = pmd_swp_mkuffd_wp(newpmd);
1819 set_pmd_at(mm, addr, pmd, newpmd);
1820 }
1821 goto unlock;
1822 }
1823 #endif
1824
1825 if (prot_numa) {
1826 struct page *page;
1827
1828
1829
1830
1831
1832 if (is_huge_zero_pmd(*pmd))
1833 goto unlock;
1834
1835 if (pmd_protnone(*pmd))
1836 goto unlock;
1837
1838 page = pmd_page(*pmd);
1839
1840
1841
1842
1843 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
1844 node_is_toptier(page_to_nid(page)))
1845 goto unlock;
1846 }
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
1869
1870 entry = pmd_modify(oldpmd, newprot);
1871 if (preserve_write)
1872 entry = pmd_mk_savedwrite(entry);
1873 if (uffd_wp) {
1874 entry = pmd_wrprotect(entry);
1875 entry = pmd_mkuffd_wp(entry);
1876 } else if (uffd_wp_resolve) {
1877
1878
1879
1880
1881
1882 entry = pmd_clear_uffd_wp(entry);
1883 }
1884 ret = HPAGE_PMD_NR;
1885 set_pmd_at(mm, addr, pmd, entry);
1886
1887 if (huge_pmd_needs_flush(oldpmd, entry))
1888 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
1889
1890 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1891 unlock:
1892 spin_unlock(ptl);
1893 return ret;
1894 }
1895
1896
1897
1898
1899
1900
1901
1902 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1903 {
1904 spinlock_t *ptl;
1905 ptl = pmd_lock(vma->vm_mm, pmd);
1906 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1907 pmd_devmap(*pmd)))
1908 return ptl;
1909 spin_unlock(ptl);
1910 return NULL;
1911 }
1912
1913
1914
1915
1916
1917
1918
1919 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1920 {
1921 spinlock_t *ptl;
1922
1923 ptl = pud_lock(vma->vm_mm, pud);
1924 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1925 return ptl;
1926 spin_unlock(ptl);
1927 return NULL;
1928 }
1929
1930 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1931 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1932 pud_t *pud, unsigned long addr)
1933 {
1934 spinlock_t *ptl;
1935
1936 ptl = __pud_trans_huge_lock(pud, vma);
1937 if (!ptl)
1938 return 0;
1939
1940 pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
1941 tlb_remove_pud_tlb_entry(tlb, pud, addr);
1942 if (vma_is_special_huge(vma)) {
1943 spin_unlock(ptl);
1944
1945 } else {
1946
1947 BUG();
1948 }
1949 return 1;
1950 }
1951
1952 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1953 unsigned long haddr)
1954 {
1955 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1956 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1957 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1958 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1959
1960 count_vm_event(THP_SPLIT_PUD);
1961
1962 pudp_huge_clear_flush_notify(vma, haddr, pud);
1963 }
1964
1965 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
1966 unsigned long address)
1967 {
1968 spinlock_t *ptl;
1969 struct mmu_notifier_range range;
1970
1971 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1972 address & HPAGE_PUD_MASK,
1973 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
1974 mmu_notifier_invalidate_range_start(&range);
1975 ptl = pud_lock(vma->vm_mm, pud);
1976 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
1977 goto out;
1978 __split_huge_pud_locked(vma, pud, range.start);
1979
1980 out:
1981 spin_unlock(ptl);
1982
1983
1984
1985
1986 mmu_notifier_invalidate_range_only_end(&range);
1987 }
1988 #endif
1989
1990 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
1991 unsigned long haddr, pmd_t *pmd)
1992 {
1993 struct mm_struct *mm = vma->vm_mm;
1994 pgtable_t pgtable;
1995 pmd_t _pmd;
1996 int i;
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006 pmdp_huge_clear_flush(vma, haddr, pmd);
2007
2008 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2009 pmd_populate(mm, &_pmd, pgtable);
2010
2011 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2012 pte_t *pte, entry;
2013 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2014 entry = pte_mkspecial(entry);
2015 pte = pte_offset_map(&_pmd, haddr);
2016 VM_BUG_ON(!pte_none(*pte));
2017 set_pte_at(mm, haddr, pte, entry);
2018 pte_unmap(pte);
2019 }
2020 smp_wmb();
2021 pmd_populate(mm, pmd, pgtable);
2022 }
2023
2024 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2025 unsigned long haddr, bool freeze)
2026 {
2027 struct mm_struct *mm = vma->vm_mm;
2028 struct page *page;
2029 pgtable_t pgtable;
2030 pmd_t old_pmd, _pmd;
2031 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2032 bool anon_exclusive = false;
2033 unsigned long addr;
2034 int i;
2035
2036 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2037 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2038 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2039 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2040 && !pmd_devmap(*pmd));
2041
2042 count_vm_event(THP_SPLIT_PMD);
2043
2044 if (!vma_is_anonymous(vma)) {
2045 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2046
2047
2048
2049
2050 if (arch_needs_pgtable_deposit())
2051 zap_deposited_table(mm, pmd);
2052 if (vma_is_special_huge(vma))
2053 return;
2054 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2055 swp_entry_t entry;
2056
2057 entry = pmd_to_swp_entry(old_pmd);
2058 page = pfn_swap_entry_to_page(entry);
2059 } else {
2060 page = pmd_page(old_pmd);
2061 if (!PageDirty(page) && pmd_dirty(old_pmd))
2062 set_page_dirty(page);
2063 if (!PageReferenced(page) && pmd_young(old_pmd))
2064 SetPageReferenced(page);
2065 page_remove_rmap(page, vma, true);
2066 put_page(page);
2067 }
2068 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2069 return;
2070 }
2071
2072 if (is_huge_zero_pmd(*pmd)) {
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2083 }
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2106
2107 pmd_migration = is_pmd_migration_entry(old_pmd);
2108 if (unlikely(pmd_migration)) {
2109 swp_entry_t entry;
2110
2111 entry = pmd_to_swp_entry(old_pmd);
2112 page = pfn_swap_entry_to_page(entry);
2113 write = is_writable_migration_entry(entry);
2114 if (PageAnon(page))
2115 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2116 young = false;
2117 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2118 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2119 } else {
2120 page = pmd_page(old_pmd);
2121 if (pmd_dirty(old_pmd))
2122 SetPageDirty(page);
2123 write = pmd_write(old_pmd);
2124 young = pmd_young(old_pmd);
2125 soft_dirty = pmd_soft_dirty(old_pmd);
2126 uffd_wp = pmd_uffd_wp(old_pmd);
2127
2128 VM_BUG_ON_PAGE(!page_count(page), page);
2129 page_ref_add(page, HPAGE_PMD_NR - 1);
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
2145 if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2146 freeze = false;
2147 }
2148
2149
2150
2151
2152
2153 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2154 pmd_populate(mm, &_pmd, pgtable);
2155
2156 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2157 pte_t entry, *pte;
2158
2159
2160
2161
2162
2163 if (freeze || pmd_migration) {
2164 swp_entry_t swp_entry;
2165 if (write)
2166 swp_entry = make_writable_migration_entry(
2167 page_to_pfn(page + i));
2168 else if (anon_exclusive)
2169 swp_entry = make_readable_exclusive_migration_entry(
2170 page_to_pfn(page + i));
2171 else
2172 swp_entry = make_readable_migration_entry(
2173 page_to_pfn(page + i));
2174 entry = swp_entry_to_pte(swp_entry);
2175 if (soft_dirty)
2176 entry = pte_swp_mksoft_dirty(entry);
2177 if (uffd_wp)
2178 entry = pte_swp_mkuffd_wp(entry);
2179 } else {
2180 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2181 entry = maybe_mkwrite(entry, vma);
2182 if (anon_exclusive)
2183 SetPageAnonExclusive(page + i);
2184 if (!write)
2185 entry = pte_wrprotect(entry);
2186 if (!young)
2187 entry = pte_mkold(entry);
2188 if (soft_dirty)
2189 entry = pte_mksoft_dirty(entry);
2190 if (uffd_wp)
2191 entry = pte_mkuffd_wp(entry);
2192 }
2193 pte = pte_offset_map(&_pmd, addr);
2194 BUG_ON(!pte_none(*pte));
2195 set_pte_at(mm, addr, pte, entry);
2196 if (!pmd_migration)
2197 atomic_inc(&page[i]._mapcount);
2198 pte_unmap(pte);
2199 }
2200
2201 if (!pmd_migration) {
2202
2203
2204
2205
2206 if (compound_mapcount(page) > 1 &&
2207 !TestSetPageDoubleMap(page)) {
2208 for (i = 0; i < HPAGE_PMD_NR; i++)
2209 atomic_inc(&page[i]._mapcount);
2210 }
2211
2212 lock_page_memcg(page);
2213 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2214
2215 __mod_lruvec_page_state(page, NR_ANON_THPS,
2216 -HPAGE_PMD_NR);
2217 if (TestClearPageDoubleMap(page)) {
2218
2219 for (i = 0; i < HPAGE_PMD_NR; i++)
2220 atomic_dec(&page[i]._mapcount);
2221 }
2222 }
2223 unlock_page_memcg(page);
2224
2225
2226 munlock_vma_page(page, vma, true);
2227 }
2228
2229 smp_wmb();
2230 pmd_populate(mm, pmd, pgtable);
2231
2232 if (freeze) {
2233 for (i = 0; i < HPAGE_PMD_NR; i++) {
2234 page_remove_rmap(page + i, vma, false);
2235 put_page(page + i);
2236 }
2237 }
2238 }
2239
2240 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2241 unsigned long address, bool freeze, struct folio *folio)
2242 {
2243 spinlock_t *ptl;
2244 struct mmu_notifier_range range;
2245
2246 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
2247 address & HPAGE_PMD_MASK,
2248 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2249 mmu_notifier_invalidate_range_start(&range);
2250 ptl = pmd_lock(vma->vm_mm, pmd);
2251
2252
2253
2254
2255
2256 VM_BUG_ON(freeze && !folio);
2257 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2258
2259 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2260 is_pmd_migration_entry(*pmd)) {
2261
2262
2263
2264
2265 if (folio && folio != page_folio(pmd_page(*pmd)))
2266 goto out;
2267 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2268 }
2269
2270 out:
2271 spin_unlock(ptl);
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285 mmu_notifier_invalidate_range_only_end(&range);
2286 }
2287
2288 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2289 bool freeze, struct folio *folio)
2290 {
2291 pgd_t *pgd;
2292 p4d_t *p4d;
2293 pud_t *pud;
2294 pmd_t *pmd;
2295
2296 pgd = pgd_offset(vma->vm_mm, address);
2297 if (!pgd_present(*pgd))
2298 return;
2299
2300 p4d = p4d_offset(pgd, address);
2301 if (!p4d_present(*p4d))
2302 return;
2303
2304 pud = pud_offset(p4d, address);
2305 if (!pud_present(*pud))
2306 return;
2307
2308 pmd = pmd_offset(pud, address);
2309
2310 __split_huge_pmd(vma, pmd, address, freeze, folio);
2311 }
2312
2313 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2314 {
2315
2316
2317
2318
2319 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2320 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2321 ALIGN(address, HPAGE_PMD_SIZE)))
2322 split_huge_pmd_address(vma, address, false, NULL);
2323 }
2324
2325 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2326 unsigned long start,
2327 unsigned long end,
2328 long adjust_next)
2329 {
2330
2331 split_huge_pmd_if_needed(vma, start);
2332
2333
2334 split_huge_pmd_if_needed(vma, end);
2335
2336
2337
2338
2339
2340 if (adjust_next > 0) {
2341 struct vm_area_struct *next = vma->vm_next;
2342 unsigned long nstart = next->vm_start;
2343 nstart += adjust_next;
2344 split_huge_pmd_if_needed(next, nstart);
2345 }
2346 }
2347
2348 static void unmap_page(struct page *page)
2349 {
2350 struct folio *folio = page_folio(page);
2351 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2352 TTU_SYNC;
2353
2354 VM_BUG_ON_PAGE(!PageHead(page), page);
2355
2356
2357
2358
2359
2360
2361 if (folio_test_anon(folio))
2362 try_to_migrate(folio, ttu_flags);
2363 else
2364 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2365 }
2366
2367 static void remap_page(struct folio *folio, unsigned long nr)
2368 {
2369 int i = 0;
2370
2371
2372 if (!folio_test_anon(folio))
2373 return;
2374 for (;;) {
2375 remove_migration_ptes(folio, folio, true);
2376 i += folio_nr_pages(folio);
2377 if (i >= nr)
2378 break;
2379 folio = folio_next(folio);
2380 }
2381 }
2382
2383 static void lru_add_page_tail(struct page *head, struct page *tail,
2384 struct lruvec *lruvec, struct list_head *list)
2385 {
2386 VM_BUG_ON_PAGE(!PageHead(head), head);
2387 VM_BUG_ON_PAGE(PageCompound(tail), head);
2388 VM_BUG_ON_PAGE(PageLRU(tail), head);
2389 lockdep_assert_held(&lruvec->lru_lock);
2390
2391 if (list) {
2392
2393 VM_WARN_ON(PageLRU(head));
2394 get_page(tail);
2395 list_add_tail(&tail->lru, list);
2396 } else {
2397
2398 VM_WARN_ON(!PageLRU(head));
2399 if (PageUnevictable(tail))
2400 tail->mlock_count = 0;
2401 else
2402 list_add_tail(&tail->lru, &head->lru);
2403 SetPageLRU(tail);
2404 }
2405 }
2406
2407 static void __split_huge_page_tail(struct page *head, int tail,
2408 struct lruvec *lruvec, struct list_head *list)
2409 {
2410 struct page *page_tail = head + tail;
2411
2412 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2428 page_tail->flags |= (head->flags &
2429 ((1L << PG_referenced) |
2430 (1L << PG_swapbacked) |
2431 (1L << PG_swapcache) |
2432 (1L << PG_mlocked) |
2433 (1L << PG_uptodate) |
2434 (1L << PG_active) |
2435 (1L << PG_workingset) |
2436 (1L << PG_locked) |
2437 (1L << PG_unevictable) |
2438 #ifdef CONFIG_64BIT
2439 (1L << PG_arch_2) |
2440 #endif
2441 (1L << PG_dirty)));
2442
2443
2444 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2445 page_tail);
2446 page_tail->mapping = head->mapping;
2447 page_tail->index = head->index + tail;
2448 page_tail->private = 0;
2449
2450
2451 smp_wmb();
2452
2453
2454
2455
2456
2457
2458
2459 clear_compound_head(page_tail);
2460
2461
2462 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2463 PageSwapCache(head)));
2464
2465 if (page_is_young(head))
2466 set_page_young(page_tail);
2467 if (page_is_idle(head))
2468 set_page_idle(page_tail);
2469
2470 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2471
2472
2473
2474
2475
2476
2477 lru_add_page_tail(head, page_tail, lruvec, list);
2478 }
2479
2480 static void __split_huge_page(struct page *page, struct list_head *list,
2481 pgoff_t end)
2482 {
2483 struct folio *folio = page_folio(page);
2484 struct page *head = &folio->page;
2485 struct lruvec *lruvec;
2486 struct address_space *swap_cache = NULL;
2487 unsigned long offset = 0;
2488 unsigned int nr = thp_nr_pages(head);
2489 int i;
2490
2491
2492 split_page_memcg(head, nr);
2493
2494 if (PageAnon(head) && PageSwapCache(head)) {
2495 swp_entry_t entry = { .val = page_private(head) };
2496
2497 offset = swp_offset(entry);
2498 swap_cache = swap_address_space(entry);
2499 xa_lock(&swap_cache->i_pages);
2500 }
2501
2502
2503 lruvec = folio_lruvec_lock(folio);
2504
2505 ClearPageHasHWPoisoned(head);
2506
2507 for (i = nr - 1; i >= 1; i--) {
2508 __split_huge_page_tail(head, i, lruvec, list);
2509
2510 if (head[i].index >= end) {
2511 struct folio *tail = page_folio(head + i);
2512
2513 if (shmem_mapping(head->mapping))
2514 shmem_uncharge(head->mapping->host, 1);
2515 else if (folio_test_clear_dirty(tail))
2516 folio_account_cleaned(tail,
2517 inode_to_wb(folio->mapping->host));
2518 __filemap_remove_folio(tail, NULL);
2519 folio_put(tail);
2520 } else if (!PageAnon(page)) {
2521 __xa_store(&head->mapping->i_pages, head[i].index,
2522 head + i, 0);
2523 } else if (swap_cache) {
2524 __xa_store(&swap_cache->i_pages, offset + i,
2525 head + i, 0);
2526 }
2527 }
2528
2529 ClearPageCompound(head);
2530 unlock_page_lruvec(lruvec);
2531
2532
2533 split_page_owner(head, nr);
2534
2535
2536 if (PageAnon(head)) {
2537
2538 if (PageSwapCache(head)) {
2539 page_ref_add(head, 2);
2540 xa_unlock(&swap_cache->i_pages);
2541 } else {
2542 page_ref_inc(head);
2543 }
2544 } else {
2545
2546 page_ref_add(head, 2);
2547 xa_unlock(&head->mapping->i_pages);
2548 }
2549 local_irq_enable();
2550
2551 remap_page(folio, nr);
2552
2553 if (PageSwapCache(head)) {
2554 swp_entry_t entry = { .val = page_private(head) };
2555
2556 split_swap_cluster(entry);
2557 }
2558
2559 for (i = 0; i < nr; i++) {
2560 struct page *subpage = head + i;
2561 if (subpage == page)
2562 continue;
2563 unlock_page(subpage);
2564
2565
2566
2567
2568
2569
2570
2571
2572 free_page_and_swap_cache(subpage);
2573 }
2574 }
2575
2576
2577 bool can_split_folio(struct folio *folio, int *pextra_pins)
2578 {
2579 int extra_pins;
2580
2581
2582 if (folio_test_anon(folio))
2583 extra_pins = folio_test_swapcache(folio) ?
2584 folio_nr_pages(folio) : 0;
2585 else
2586 extra_pins = folio_nr_pages(folio);
2587 if (pextra_pins)
2588 *pextra_pins = extra_pins;
2589 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2590 }
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611 int split_huge_page_to_list(struct page *page, struct list_head *list)
2612 {
2613 struct folio *folio = page_folio(page);
2614 struct page *head = &folio->page;
2615 struct deferred_split *ds_queue = get_deferred_split_queue(head);
2616 XA_STATE(xas, &head->mapping->i_pages, head->index);
2617 struct anon_vma *anon_vma = NULL;
2618 struct address_space *mapping = NULL;
2619 int extra_pins, ret;
2620 pgoff_t end;
2621 bool is_hzp;
2622
2623 VM_BUG_ON_PAGE(!PageLocked(head), head);
2624 VM_BUG_ON_PAGE(!PageCompound(head), head);
2625
2626 is_hzp = is_huge_zero_page(head);
2627 VM_WARN_ON_ONCE_PAGE(is_hzp, head);
2628 if (is_hzp)
2629 return -EBUSY;
2630
2631 if (PageWriteback(head))
2632 return -EBUSY;
2633
2634 if (PageAnon(head)) {
2635
2636
2637
2638
2639
2640
2641
2642
2643 anon_vma = page_get_anon_vma(head);
2644 if (!anon_vma) {
2645 ret = -EBUSY;
2646 goto out;
2647 }
2648 end = -1;
2649 mapping = NULL;
2650 anon_vma_lock_write(anon_vma);
2651 } else {
2652 mapping = head->mapping;
2653
2654
2655 if (!mapping) {
2656 ret = -EBUSY;
2657 goto out;
2658 }
2659
2660 xas_split_alloc(&xas, head, compound_order(head),
2661 mapping_gfp_mask(mapping) & GFP_RECLAIM_MASK);
2662 if (xas_error(&xas)) {
2663 ret = xas_error(&xas);
2664 goto out;
2665 }
2666
2667 anon_vma = NULL;
2668 i_mmap_lock_read(mapping);
2669
2670
2671
2672
2673
2674
2675
2676
2677 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2678 if (shmem_mapping(mapping))
2679 end = shmem_fallocend(mapping->host, end);
2680 }
2681
2682
2683
2684
2685
2686 if (!can_split_folio(folio, &extra_pins)) {
2687 ret = -EBUSY;
2688 goto out_unlock;
2689 }
2690
2691 unmap_page(head);
2692
2693
2694 local_irq_disable();
2695 if (mapping) {
2696
2697
2698
2699
2700 xas_lock(&xas);
2701 xas_reset(&xas);
2702 if (xas_load(&xas) != head)
2703 goto fail;
2704 }
2705
2706
2707 spin_lock(&ds_queue->split_queue_lock);
2708 if (page_ref_freeze(head, 1 + extra_pins)) {
2709 if (!list_empty(page_deferred_list(head))) {
2710 ds_queue->split_queue_len--;
2711 list_del(page_deferred_list(head));
2712 }
2713 spin_unlock(&ds_queue->split_queue_lock);
2714 if (mapping) {
2715 int nr = thp_nr_pages(head);
2716
2717 xas_split(&xas, head, thp_order(head));
2718 if (PageSwapBacked(head)) {
2719 __mod_lruvec_page_state(head, NR_SHMEM_THPS,
2720 -nr);
2721 } else {
2722 __mod_lruvec_page_state(head, NR_FILE_THPS,
2723 -nr);
2724 filemap_nr_thps_dec(mapping);
2725 }
2726 }
2727
2728 __split_huge_page(page, list, end);
2729 ret = 0;
2730 } else {
2731 spin_unlock(&ds_queue->split_queue_lock);
2732 fail:
2733 if (mapping)
2734 xas_unlock(&xas);
2735 local_irq_enable();
2736 remap_page(folio, folio_nr_pages(folio));
2737 ret = -EBUSY;
2738 }
2739
2740 out_unlock:
2741 if (anon_vma) {
2742 anon_vma_unlock_write(anon_vma);
2743 put_anon_vma(anon_vma);
2744 }
2745 if (mapping)
2746 i_mmap_unlock_read(mapping);
2747 out:
2748 xas_destroy(&xas);
2749 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2750 return ret;
2751 }
2752
2753 void free_transhuge_page(struct page *page)
2754 {
2755 struct deferred_split *ds_queue = get_deferred_split_queue(page);
2756 unsigned long flags;
2757
2758 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2759 if (!list_empty(page_deferred_list(page))) {
2760 ds_queue->split_queue_len--;
2761 list_del(page_deferred_list(page));
2762 }
2763 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2764 free_compound_page(page);
2765 }
2766
2767 void deferred_split_huge_page(struct page *page)
2768 {
2769 struct deferred_split *ds_queue = get_deferred_split_queue(page);
2770 #ifdef CONFIG_MEMCG
2771 struct mem_cgroup *memcg = page_memcg(compound_head(page));
2772 #endif
2773 unsigned long flags;
2774
2775 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787 if (PageSwapCache(page))
2788 return;
2789
2790 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2791 if (list_empty(page_deferred_list(page))) {
2792 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2793 list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
2794 ds_queue->split_queue_len++;
2795 #ifdef CONFIG_MEMCG
2796 if (memcg)
2797 set_shrinker_bit(memcg, page_to_nid(page),
2798 deferred_split_shrinker.id);
2799 #endif
2800 }
2801 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2802 }
2803
2804 static unsigned long deferred_split_count(struct shrinker *shrink,
2805 struct shrink_control *sc)
2806 {
2807 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2808 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2809
2810 #ifdef CONFIG_MEMCG
2811 if (sc->memcg)
2812 ds_queue = &sc->memcg->deferred_split_queue;
2813 #endif
2814 return READ_ONCE(ds_queue->split_queue_len);
2815 }
2816
2817 static unsigned long deferred_split_scan(struct shrinker *shrink,
2818 struct shrink_control *sc)
2819 {
2820 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2821 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2822 unsigned long flags;
2823 LIST_HEAD(list), *pos, *next;
2824 struct page *page;
2825 int split = 0;
2826
2827 #ifdef CONFIG_MEMCG
2828 if (sc->memcg)
2829 ds_queue = &sc->memcg->deferred_split_queue;
2830 #endif
2831
2832 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2833
2834 list_for_each_safe(pos, next, &ds_queue->split_queue) {
2835 page = list_entry((void *)pos, struct page, deferred_list);
2836 page = compound_head(page);
2837 if (get_page_unless_zero(page)) {
2838 list_move(page_deferred_list(page), &list);
2839 } else {
2840
2841 list_del_init(page_deferred_list(page));
2842 ds_queue->split_queue_len--;
2843 }
2844 if (!--sc->nr_to_scan)
2845 break;
2846 }
2847 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2848
2849 list_for_each_safe(pos, next, &list) {
2850 page = list_entry((void *)pos, struct page, deferred_list);
2851 if (!trylock_page(page))
2852 goto next;
2853
2854 if (!split_huge_page(page))
2855 split++;
2856 unlock_page(page);
2857 next:
2858 put_page(page);
2859 }
2860
2861 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2862 list_splice_tail(&list, &ds_queue->split_queue);
2863 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2864
2865
2866
2867
2868
2869 if (!split && list_empty(&ds_queue->split_queue))
2870 return SHRINK_STOP;
2871 return split;
2872 }
2873
2874 static struct shrinker deferred_split_shrinker = {
2875 .count_objects = deferred_split_count,
2876 .scan_objects = deferred_split_scan,
2877 .seeks = DEFAULT_SEEKS,
2878 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
2879 SHRINKER_NONSLAB,
2880 };
2881
2882 #ifdef CONFIG_DEBUG_FS
2883 static void split_huge_pages_all(void)
2884 {
2885 struct zone *zone;
2886 struct page *page;
2887 unsigned long pfn, max_zone_pfn;
2888 unsigned long total = 0, split = 0;
2889
2890 pr_debug("Split all THPs\n");
2891 for_each_zone(zone) {
2892 if (!managed_zone(zone))
2893 continue;
2894 max_zone_pfn = zone_end_pfn(zone);
2895 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2896 int nr_pages;
2897
2898 page = pfn_to_online_page(pfn);
2899 if (!page || !get_page_unless_zero(page))
2900 continue;
2901
2902 if (zone != page_zone(page))
2903 goto next;
2904
2905 if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
2906 goto next;
2907
2908 total++;
2909 lock_page(page);
2910 nr_pages = thp_nr_pages(page);
2911 if (!split_huge_page(page))
2912 split++;
2913 pfn += nr_pages - 1;
2914 unlock_page(page);
2915 next:
2916 put_page(page);
2917 cond_resched();
2918 }
2919 }
2920
2921 pr_debug("%lu of %lu THP split\n", split, total);
2922 }
2923
2924 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
2925 {
2926 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
2927 is_vm_hugetlb_page(vma);
2928 }
2929
2930 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
2931 unsigned long vaddr_end)
2932 {
2933 int ret = 0;
2934 struct task_struct *task;
2935 struct mm_struct *mm;
2936 unsigned long total = 0, split = 0;
2937 unsigned long addr;
2938
2939 vaddr_start &= PAGE_MASK;
2940 vaddr_end &= PAGE_MASK;
2941
2942
2943 rcu_read_lock();
2944 task = find_task_by_vpid(pid);
2945 if (!task) {
2946 rcu_read_unlock();
2947 ret = -ESRCH;
2948 goto out;
2949 }
2950 get_task_struct(task);
2951 rcu_read_unlock();
2952
2953
2954 mm = get_task_mm(task);
2955 put_task_struct(task);
2956
2957 if (!mm) {
2958 ret = -EINVAL;
2959 goto out;
2960 }
2961
2962 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
2963 pid, vaddr_start, vaddr_end);
2964
2965 mmap_read_lock(mm);
2966
2967
2968
2969
2970 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
2971 struct vm_area_struct *vma = vma_lookup(mm, addr);
2972 struct page *page;
2973
2974 if (!vma)
2975 break;
2976
2977
2978 if (vma_not_suitable_for_thp_split(vma)) {
2979 addr = vma->vm_end;
2980 continue;
2981 }
2982
2983
2984 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2985
2986 if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
2987 continue;
2988
2989 if (!is_transparent_hugepage(page))
2990 goto next;
2991
2992 total++;
2993 if (!can_split_folio(page_folio(page), NULL))
2994 goto next;
2995
2996 if (!trylock_page(page))
2997 goto next;
2998
2999 if (!split_huge_page(page))
3000 split++;
3001
3002 unlock_page(page);
3003 next:
3004 put_page(page);
3005 cond_resched();
3006 }
3007 mmap_read_unlock(mm);
3008 mmput(mm);
3009
3010 pr_debug("%lu of %lu THP split\n", split, total);
3011
3012 out:
3013 return ret;
3014 }
3015
3016 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3017 pgoff_t off_end)
3018 {
3019 struct filename *file;
3020 struct file *candidate;
3021 struct address_space *mapping;
3022 int ret = -EINVAL;
3023 pgoff_t index;
3024 int nr_pages = 1;
3025 unsigned long total = 0, split = 0;
3026
3027 file = getname_kernel(file_path);
3028 if (IS_ERR(file))
3029 return ret;
3030
3031 candidate = file_open_name(file, O_RDONLY, 0);
3032 if (IS_ERR(candidate))
3033 goto out;
3034
3035 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3036 file_path, off_start, off_end);
3037
3038 mapping = candidate->f_mapping;
3039
3040 for (index = off_start; index < off_end; index += nr_pages) {
3041 struct page *fpage = pagecache_get_page(mapping, index,
3042 FGP_ENTRY | FGP_HEAD, 0);
3043
3044 nr_pages = 1;
3045 if (xa_is_value(fpage) || !fpage)
3046 continue;
3047
3048 if (!is_transparent_hugepage(fpage))
3049 goto next;
3050
3051 total++;
3052 nr_pages = thp_nr_pages(fpage);
3053
3054 if (!trylock_page(fpage))
3055 goto next;
3056
3057 if (!split_huge_page(fpage))
3058 split++;
3059
3060 unlock_page(fpage);
3061 next:
3062 put_page(fpage);
3063 cond_resched();
3064 }
3065
3066 filp_close(candidate, NULL);
3067 ret = 0;
3068
3069 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3070 out:
3071 putname(file);
3072 return ret;
3073 }
3074
3075 #define MAX_INPUT_BUF_SZ 255
3076
3077 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3078 size_t count, loff_t *ppops)
3079 {
3080 static DEFINE_MUTEX(split_debug_mutex);
3081 ssize_t ret;
3082
3083 char input_buf[MAX_INPUT_BUF_SZ];
3084 int pid;
3085 unsigned long vaddr_start, vaddr_end;
3086
3087 ret = mutex_lock_interruptible(&split_debug_mutex);
3088 if (ret)
3089 return ret;
3090
3091 ret = -EFAULT;
3092
3093 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3094 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3095 goto out;
3096
3097 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3098
3099 if (input_buf[0] == '/') {
3100 char *tok;
3101 char *buf = input_buf;
3102 char file_path[MAX_INPUT_BUF_SZ];
3103 pgoff_t off_start = 0, off_end = 0;
3104 size_t input_len = strlen(input_buf);
3105
3106 tok = strsep(&buf, ",");
3107 if (tok) {
3108 strcpy(file_path, tok);
3109 } else {
3110 ret = -EINVAL;
3111 goto out;
3112 }
3113
3114 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3115 if (ret != 2) {
3116 ret = -EINVAL;
3117 goto out;
3118 }
3119 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3120 if (!ret)
3121 ret = input_len;
3122
3123 goto out;
3124 }
3125
3126 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3127 if (ret == 1 && pid == 1) {
3128 split_huge_pages_all();
3129 ret = strlen(input_buf);
3130 goto out;
3131 } else if (ret != 3) {
3132 ret = -EINVAL;
3133 goto out;
3134 }
3135
3136 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3137 if (!ret)
3138 ret = strlen(input_buf);
3139 out:
3140 mutex_unlock(&split_debug_mutex);
3141 return ret;
3142
3143 }
3144
3145 static const struct file_operations split_huge_pages_fops = {
3146 .owner = THIS_MODULE,
3147 .write = split_huge_pages_write,
3148 .llseek = no_llseek,
3149 };
3150
3151 static int __init split_huge_pages_debugfs(void)
3152 {
3153 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3154 &split_huge_pages_fops);
3155 return 0;
3156 }
3157 late_initcall(split_huge_pages_debugfs);
3158 #endif
3159
3160 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3161 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3162 struct page *page)
3163 {
3164 struct vm_area_struct *vma = pvmw->vma;
3165 struct mm_struct *mm = vma->vm_mm;
3166 unsigned long address = pvmw->address;
3167 bool anon_exclusive;
3168 pmd_t pmdval;
3169 swp_entry_t entry;
3170 pmd_t pmdswp;
3171
3172 if (!(pvmw->pmd && !pvmw->pte))
3173 return 0;
3174
3175 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3176 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3177
3178 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3179 if (anon_exclusive && page_try_share_anon_rmap(page)) {
3180 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3181 return -EBUSY;
3182 }
3183
3184 if (pmd_dirty(pmdval))
3185 set_page_dirty(page);
3186 if (pmd_write(pmdval))
3187 entry = make_writable_migration_entry(page_to_pfn(page));
3188 else if (anon_exclusive)
3189 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3190 else
3191 entry = make_readable_migration_entry(page_to_pfn(page));
3192 pmdswp = swp_entry_to_pmd(entry);
3193 if (pmd_soft_dirty(pmdval))
3194 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3195 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3196 page_remove_rmap(page, vma, true);
3197 put_page(page);
3198 trace_set_migration_pmd(address, pmd_val(pmdswp));
3199
3200 return 0;
3201 }
3202
3203 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3204 {
3205 struct vm_area_struct *vma = pvmw->vma;
3206 struct mm_struct *mm = vma->vm_mm;
3207 unsigned long address = pvmw->address;
3208 unsigned long haddr = address & HPAGE_PMD_MASK;
3209 pmd_t pmde;
3210 swp_entry_t entry;
3211
3212 if (!(pvmw->pmd && !pvmw->pte))
3213 return;
3214
3215 entry = pmd_to_swp_entry(*pvmw->pmd);
3216 get_page(new);
3217 pmde = pmd_mkold(mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)));
3218 if (pmd_swp_soft_dirty(*pvmw->pmd))
3219 pmde = pmd_mksoft_dirty(pmde);
3220 if (is_writable_migration_entry(entry))
3221 pmde = maybe_pmd_mkwrite(pmde, vma);
3222 if (pmd_swp_uffd_wp(*pvmw->pmd))
3223 pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
3224
3225 if (PageAnon(new)) {
3226 rmap_t rmap_flags = RMAP_COMPOUND;
3227
3228 if (!is_readable_migration_entry(entry))
3229 rmap_flags |= RMAP_EXCLUSIVE;
3230
3231 page_add_anon_rmap(new, vma, haddr, rmap_flags);
3232 } else {
3233 page_add_file_rmap(new, vma, true);
3234 }
3235 VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
3236 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3237
3238
3239 update_mmu_cache_pmd(vma, address, pvmw->pmd);
3240 trace_remove_migration_pmd(address, pmd_val(pmde));
3241 }
3242 #endif