Back to home page

LXR

 
 

    


0001 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0002 
0003 #include <linux/mm.h>
0004 #include <linux/sched.h>
0005 #include <linux/mmu_notifier.h>
0006 #include <linux/rmap.h>
0007 #include <linux/swap.h>
0008 #include <linux/mm_inline.h>
0009 #include <linux/kthread.h>
0010 #include <linux/khugepaged.h>
0011 #include <linux/freezer.h>
0012 #include <linux/mman.h>
0013 #include <linux/hashtable.h>
0014 #include <linux/userfaultfd_k.h>
0015 #include <linux/page_idle.h>
0016 #include <linux/swapops.h>
0017 #include <linux/shmem_fs.h>
0018 
0019 #include <asm/tlb.h>
0020 #include <asm/pgalloc.h>
0021 #include "internal.h"
0022 
0023 enum scan_result {
0024     SCAN_FAIL,
0025     SCAN_SUCCEED,
0026     SCAN_PMD_NULL,
0027     SCAN_EXCEED_NONE_PTE,
0028     SCAN_PTE_NON_PRESENT,
0029     SCAN_PAGE_RO,
0030     SCAN_LACK_REFERENCED_PAGE,
0031     SCAN_PAGE_NULL,
0032     SCAN_SCAN_ABORT,
0033     SCAN_PAGE_COUNT,
0034     SCAN_PAGE_LRU,
0035     SCAN_PAGE_LOCK,
0036     SCAN_PAGE_ANON,
0037     SCAN_PAGE_COMPOUND,
0038     SCAN_ANY_PROCESS,
0039     SCAN_VMA_NULL,
0040     SCAN_VMA_CHECK,
0041     SCAN_ADDRESS_RANGE,
0042     SCAN_SWAP_CACHE_PAGE,
0043     SCAN_DEL_PAGE_LRU,
0044     SCAN_ALLOC_HUGE_PAGE_FAIL,
0045     SCAN_CGROUP_CHARGE_FAIL,
0046     SCAN_EXCEED_SWAP_PTE,
0047     SCAN_TRUNCATED,
0048 };
0049 
0050 #define CREATE_TRACE_POINTS
0051 #include <trace/events/huge_memory.h>
0052 
0053 /* default scan 8*512 pte (or vmas) every 30 second */
0054 static unsigned int khugepaged_pages_to_scan __read_mostly;
0055 static unsigned int khugepaged_pages_collapsed;
0056 static unsigned int khugepaged_full_scans;
0057 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
0058 /* during fragmentation poll the hugepage allocator once every minute */
0059 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
0060 static unsigned long khugepaged_sleep_expire;
0061 static DEFINE_SPINLOCK(khugepaged_mm_lock);
0062 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
0063 /*
0064  * default collapse hugepages if there is at least one pte mapped like
0065  * it would have happened if the vma was large enough during page
0066  * fault.
0067  */
0068 static unsigned int khugepaged_max_ptes_none __read_mostly;
0069 static unsigned int khugepaged_max_ptes_swap __read_mostly;
0070 
0071 #define MM_SLOTS_HASH_BITS 10
0072 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
0073 
0074 static struct kmem_cache *mm_slot_cache __read_mostly;
0075 
0076 /**
0077  * struct mm_slot - hash lookup from mm to mm_slot
0078  * @hash: hash collision list
0079  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
0080  * @mm: the mm that this information is valid for
0081  */
0082 struct mm_slot {
0083     struct hlist_node hash;
0084     struct list_head mm_node;
0085     struct mm_struct *mm;
0086 };
0087 
0088 /**
0089  * struct khugepaged_scan - cursor for scanning
0090  * @mm_head: the head of the mm list to scan
0091  * @mm_slot: the current mm_slot we are scanning
0092  * @address: the next address inside that to be scanned
0093  *
0094  * There is only the one khugepaged_scan instance of this cursor structure.
0095  */
0096 struct khugepaged_scan {
0097     struct list_head mm_head;
0098     struct mm_slot *mm_slot;
0099     unsigned long address;
0100 };
0101 
0102 static struct khugepaged_scan khugepaged_scan = {
0103     .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
0104 };
0105 
0106 #ifdef CONFIG_SYSFS
0107 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
0108                      struct kobj_attribute *attr,
0109                      char *buf)
0110 {
0111     return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
0112 }
0113 
0114 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
0115                       struct kobj_attribute *attr,
0116                       const char *buf, size_t count)
0117 {
0118     unsigned long msecs;
0119     int err;
0120 
0121     err = kstrtoul(buf, 10, &msecs);
0122     if (err || msecs > UINT_MAX)
0123         return -EINVAL;
0124 
0125     khugepaged_scan_sleep_millisecs = msecs;
0126     khugepaged_sleep_expire = 0;
0127     wake_up_interruptible(&khugepaged_wait);
0128 
0129     return count;
0130 }
0131 static struct kobj_attribute scan_sleep_millisecs_attr =
0132     __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
0133            scan_sleep_millisecs_store);
0134 
0135 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
0136                       struct kobj_attribute *attr,
0137                       char *buf)
0138 {
0139     return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
0140 }
0141 
0142 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
0143                        struct kobj_attribute *attr,
0144                        const char *buf, size_t count)
0145 {
0146     unsigned long msecs;
0147     int err;
0148 
0149     err = kstrtoul(buf, 10, &msecs);
0150     if (err || msecs > UINT_MAX)
0151         return -EINVAL;
0152 
0153     khugepaged_alloc_sleep_millisecs = msecs;
0154     khugepaged_sleep_expire = 0;
0155     wake_up_interruptible(&khugepaged_wait);
0156 
0157     return count;
0158 }
0159 static struct kobj_attribute alloc_sleep_millisecs_attr =
0160     __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
0161            alloc_sleep_millisecs_store);
0162 
0163 static ssize_t pages_to_scan_show(struct kobject *kobj,
0164                   struct kobj_attribute *attr,
0165                   char *buf)
0166 {
0167     return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
0168 }
0169 static ssize_t pages_to_scan_store(struct kobject *kobj,
0170                    struct kobj_attribute *attr,
0171                    const char *buf, size_t count)
0172 {
0173     int err;
0174     unsigned long pages;
0175 
0176     err = kstrtoul(buf, 10, &pages);
0177     if (err || !pages || pages > UINT_MAX)
0178         return -EINVAL;
0179 
0180     khugepaged_pages_to_scan = pages;
0181 
0182     return count;
0183 }
0184 static struct kobj_attribute pages_to_scan_attr =
0185     __ATTR(pages_to_scan, 0644, pages_to_scan_show,
0186            pages_to_scan_store);
0187 
0188 static ssize_t pages_collapsed_show(struct kobject *kobj,
0189                     struct kobj_attribute *attr,
0190                     char *buf)
0191 {
0192     return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
0193 }
0194 static struct kobj_attribute pages_collapsed_attr =
0195     __ATTR_RO(pages_collapsed);
0196 
0197 static ssize_t full_scans_show(struct kobject *kobj,
0198                    struct kobj_attribute *attr,
0199                    char *buf)
0200 {
0201     return sprintf(buf, "%u\n", khugepaged_full_scans);
0202 }
0203 static struct kobj_attribute full_scans_attr =
0204     __ATTR_RO(full_scans);
0205 
0206 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
0207                       struct kobj_attribute *attr, char *buf)
0208 {
0209     return single_hugepage_flag_show(kobj, attr, buf,
0210                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
0211 }
0212 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
0213                        struct kobj_attribute *attr,
0214                        const char *buf, size_t count)
0215 {
0216     return single_hugepage_flag_store(kobj, attr, buf, count,
0217                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
0218 }
0219 static struct kobj_attribute khugepaged_defrag_attr =
0220     __ATTR(defrag, 0644, khugepaged_defrag_show,
0221            khugepaged_defrag_store);
0222 
0223 /*
0224  * max_ptes_none controls if khugepaged should collapse hugepages over
0225  * any unmapped ptes in turn potentially increasing the memory
0226  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
0227  * reduce the available free memory in the system as it
0228  * runs. Increasing max_ptes_none will instead potentially reduce the
0229  * free memory in the system during the khugepaged scan.
0230  */
0231 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
0232                          struct kobj_attribute *attr,
0233                          char *buf)
0234 {
0235     return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
0236 }
0237 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
0238                           struct kobj_attribute *attr,
0239                           const char *buf, size_t count)
0240 {
0241     int err;
0242     unsigned long max_ptes_none;
0243 
0244     err = kstrtoul(buf, 10, &max_ptes_none);
0245     if (err || max_ptes_none > HPAGE_PMD_NR-1)
0246         return -EINVAL;
0247 
0248     khugepaged_max_ptes_none = max_ptes_none;
0249 
0250     return count;
0251 }
0252 static struct kobj_attribute khugepaged_max_ptes_none_attr =
0253     __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
0254            khugepaged_max_ptes_none_store);
0255 
0256 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
0257                          struct kobj_attribute *attr,
0258                          char *buf)
0259 {
0260     return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
0261 }
0262 
0263 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
0264                           struct kobj_attribute *attr,
0265                           const char *buf, size_t count)
0266 {
0267     int err;
0268     unsigned long max_ptes_swap;
0269 
0270     err  = kstrtoul(buf, 10, &max_ptes_swap);
0271     if (err || max_ptes_swap > HPAGE_PMD_NR-1)
0272         return -EINVAL;
0273 
0274     khugepaged_max_ptes_swap = max_ptes_swap;
0275 
0276     return count;
0277 }
0278 
0279 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
0280     __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
0281            khugepaged_max_ptes_swap_store);
0282 
0283 static struct attribute *khugepaged_attr[] = {
0284     &khugepaged_defrag_attr.attr,
0285     &khugepaged_max_ptes_none_attr.attr,
0286     &pages_to_scan_attr.attr,
0287     &pages_collapsed_attr.attr,
0288     &full_scans_attr.attr,
0289     &scan_sleep_millisecs_attr.attr,
0290     &alloc_sleep_millisecs_attr.attr,
0291     &khugepaged_max_ptes_swap_attr.attr,
0292     NULL,
0293 };
0294 
0295 struct attribute_group khugepaged_attr_group = {
0296     .attrs = khugepaged_attr,
0297     .name = "khugepaged",
0298 };
0299 #endif /* CONFIG_SYSFS */
0300 
0301 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
0302 
0303 int hugepage_madvise(struct vm_area_struct *vma,
0304              unsigned long *vm_flags, int advice)
0305 {
0306     switch (advice) {
0307     case MADV_HUGEPAGE:
0308 #ifdef CONFIG_S390
0309         /*
0310          * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
0311          * can't handle this properly after s390_enable_sie, so we simply
0312          * ignore the madvise to prevent qemu from causing a SIGSEGV.
0313          */
0314         if (mm_has_pgste(vma->vm_mm))
0315             return 0;
0316 #endif
0317         *vm_flags &= ~VM_NOHUGEPAGE;
0318         *vm_flags |= VM_HUGEPAGE;
0319         /*
0320          * If the vma become good for khugepaged to scan,
0321          * register it here without waiting a page fault that
0322          * may not happen any time soon.
0323          */
0324         if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
0325                 khugepaged_enter_vma_merge(vma, *vm_flags))
0326             return -ENOMEM;
0327         break;
0328     case MADV_NOHUGEPAGE:
0329         *vm_flags &= ~VM_HUGEPAGE;
0330         *vm_flags |= VM_NOHUGEPAGE;
0331         /*
0332          * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
0333          * this vma even if we leave the mm registered in khugepaged if
0334          * it got registered before VM_NOHUGEPAGE was set.
0335          */
0336         break;
0337     }
0338 
0339     return 0;
0340 }
0341 
0342 int __init khugepaged_init(void)
0343 {
0344     mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
0345                       sizeof(struct mm_slot),
0346                       __alignof__(struct mm_slot), 0, NULL);
0347     if (!mm_slot_cache)
0348         return -ENOMEM;
0349 
0350     khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
0351     khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
0352     khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
0353 
0354     return 0;
0355 }
0356 
0357 void __init khugepaged_destroy(void)
0358 {
0359     kmem_cache_destroy(mm_slot_cache);
0360 }
0361 
0362 static inline struct mm_slot *alloc_mm_slot(void)
0363 {
0364     if (!mm_slot_cache) /* initialization failed */
0365         return NULL;
0366     return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
0367 }
0368 
0369 static inline void free_mm_slot(struct mm_slot *mm_slot)
0370 {
0371     kmem_cache_free(mm_slot_cache, mm_slot);
0372 }
0373 
0374 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
0375 {
0376     struct mm_slot *mm_slot;
0377 
0378     hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
0379         if (mm == mm_slot->mm)
0380             return mm_slot;
0381 
0382     return NULL;
0383 }
0384 
0385 static void insert_to_mm_slots_hash(struct mm_struct *mm,
0386                     struct mm_slot *mm_slot)
0387 {
0388     mm_slot->mm = mm;
0389     hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
0390 }
0391 
0392 static inline int khugepaged_test_exit(struct mm_struct *mm)
0393 {
0394     return atomic_read(&mm->mm_users) == 0;
0395 }
0396 
0397 int __khugepaged_enter(struct mm_struct *mm)
0398 {
0399     struct mm_slot *mm_slot;
0400     int wakeup;
0401 
0402     mm_slot = alloc_mm_slot();
0403     if (!mm_slot)
0404         return -ENOMEM;
0405 
0406     /* __khugepaged_exit() must not run from under us */
0407     VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
0408     if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
0409         free_mm_slot(mm_slot);
0410         return 0;
0411     }
0412 
0413     spin_lock(&khugepaged_mm_lock);
0414     insert_to_mm_slots_hash(mm, mm_slot);
0415     /*
0416      * Insert just behind the scanning cursor, to let the area settle
0417      * down a little.
0418      */
0419     wakeup = list_empty(&khugepaged_scan.mm_head);
0420     list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
0421     spin_unlock(&khugepaged_mm_lock);
0422 
0423     atomic_inc(&mm->mm_count);
0424     if (wakeup)
0425         wake_up_interruptible(&khugepaged_wait);
0426 
0427     return 0;
0428 }
0429 
0430 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
0431                    unsigned long vm_flags)
0432 {
0433     unsigned long hstart, hend;
0434     if (!vma->anon_vma)
0435         /*
0436          * Not yet faulted in so we will register later in the
0437          * page fault if needed.
0438          */
0439         return 0;
0440     if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
0441         /* khugepaged not yet working on file or special mappings */
0442         return 0;
0443     hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
0444     hend = vma->vm_end & HPAGE_PMD_MASK;
0445     if (hstart < hend)
0446         return khugepaged_enter(vma, vm_flags);
0447     return 0;
0448 }
0449 
0450 void __khugepaged_exit(struct mm_struct *mm)
0451 {
0452     struct mm_slot *mm_slot;
0453     int free = 0;
0454 
0455     spin_lock(&khugepaged_mm_lock);
0456     mm_slot = get_mm_slot(mm);
0457     if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
0458         hash_del(&mm_slot->hash);
0459         list_del(&mm_slot->mm_node);
0460         free = 1;
0461     }
0462     spin_unlock(&khugepaged_mm_lock);
0463 
0464     if (free) {
0465         clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
0466         free_mm_slot(mm_slot);
0467         mmdrop(mm);
0468     } else if (mm_slot) {
0469         /*
0470          * This is required to serialize against
0471          * khugepaged_test_exit() (which is guaranteed to run
0472          * under mmap sem read mode). Stop here (after we
0473          * return all pagetables will be destroyed) until
0474          * khugepaged has finished working on the pagetables
0475          * under the mmap_sem.
0476          */
0477         down_write(&mm->mmap_sem);
0478         up_write(&mm->mmap_sem);
0479     }
0480 }
0481 
0482 static void release_pte_page(struct page *page)
0483 {
0484     /* 0 stands for page_is_file_cache(page) == false */
0485     dec_node_page_state(page, NR_ISOLATED_ANON + 0);
0486     unlock_page(page);
0487     putback_lru_page(page);
0488 }
0489 
0490 static void release_pte_pages(pte_t *pte, pte_t *_pte)
0491 {
0492     while (--_pte >= pte) {
0493         pte_t pteval = *_pte;
0494         if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
0495             release_pte_page(pte_page(pteval));
0496     }
0497 }
0498 
0499 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
0500                     unsigned long address,
0501                     pte_t *pte)
0502 {
0503     struct page *page = NULL;
0504     pte_t *_pte;
0505     int none_or_zero = 0, result = 0, referenced = 0;
0506     bool writable = false;
0507 
0508     for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
0509          _pte++, address += PAGE_SIZE) {
0510         pte_t pteval = *_pte;
0511         if (pte_none(pteval) || (pte_present(pteval) &&
0512                 is_zero_pfn(pte_pfn(pteval)))) {
0513             if (!userfaultfd_armed(vma) &&
0514                 ++none_or_zero <= khugepaged_max_ptes_none) {
0515                 continue;
0516             } else {
0517                 result = SCAN_EXCEED_NONE_PTE;
0518                 goto out;
0519             }
0520         }
0521         if (!pte_present(pteval)) {
0522             result = SCAN_PTE_NON_PRESENT;
0523             goto out;
0524         }
0525         page = vm_normal_page(vma, address, pteval);
0526         if (unlikely(!page)) {
0527             result = SCAN_PAGE_NULL;
0528             goto out;
0529         }
0530 
0531         VM_BUG_ON_PAGE(PageCompound(page), page);
0532         VM_BUG_ON_PAGE(!PageAnon(page), page);
0533         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
0534 
0535         /*
0536          * We can do it before isolate_lru_page because the
0537          * page can't be freed from under us. NOTE: PG_lock
0538          * is needed to serialize against split_huge_page
0539          * when invoked from the VM.
0540          */
0541         if (!trylock_page(page)) {
0542             result = SCAN_PAGE_LOCK;
0543             goto out;
0544         }
0545 
0546         /*
0547          * cannot use mapcount: can't collapse if there's a gup pin.
0548          * The page must only be referenced by the scanned process
0549          * and page swap cache.
0550          */
0551         if (page_count(page) != 1 + !!PageSwapCache(page)) {
0552             unlock_page(page);
0553             result = SCAN_PAGE_COUNT;
0554             goto out;
0555         }
0556         if (pte_write(pteval)) {
0557             writable = true;
0558         } else {
0559             if (PageSwapCache(page) &&
0560                 !reuse_swap_page(page, NULL)) {
0561                 unlock_page(page);
0562                 result = SCAN_SWAP_CACHE_PAGE;
0563                 goto out;
0564             }
0565             /*
0566              * Page is not in the swap cache. It can be collapsed
0567              * into a THP.
0568              */
0569         }
0570 
0571         /*
0572          * Isolate the page to avoid collapsing an hugepage
0573          * currently in use by the VM.
0574          */
0575         if (isolate_lru_page(page)) {
0576             unlock_page(page);
0577             result = SCAN_DEL_PAGE_LRU;
0578             goto out;
0579         }
0580         /* 0 stands for page_is_file_cache(page) == false */
0581         inc_node_page_state(page, NR_ISOLATED_ANON + 0);
0582         VM_BUG_ON_PAGE(!PageLocked(page), page);
0583         VM_BUG_ON_PAGE(PageLRU(page), page);
0584 
0585         /* There should be enough young pte to collapse the page */
0586         if (pte_young(pteval) ||
0587             page_is_young(page) || PageReferenced(page) ||
0588             mmu_notifier_test_young(vma->vm_mm, address))
0589             referenced++;
0590     }
0591     if (likely(writable)) {
0592         if (likely(referenced)) {
0593             result = SCAN_SUCCEED;
0594             trace_mm_collapse_huge_page_isolate(page, none_or_zero,
0595                                 referenced, writable, result);
0596             return 1;
0597         }
0598     } else {
0599         result = SCAN_PAGE_RO;
0600     }
0601 
0602 out:
0603     release_pte_pages(pte, _pte);
0604     trace_mm_collapse_huge_page_isolate(page, none_or_zero,
0605                         referenced, writable, result);
0606     return 0;
0607 }
0608 
0609 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
0610                       struct vm_area_struct *vma,
0611                       unsigned long address,
0612                       spinlock_t *ptl)
0613 {
0614     pte_t *_pte;
0615     for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
0616         pte_t pteval = *_pte;
0617         struct page *src_page;
0618 
0619         if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
0620             clear_user_highpage(page, address);
0621             add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
0622             if (is_zero_pfn(pte_pfn(pteval))) {
0623                 /*
0624                  * ptl mostly unnecessary.
0625                  */
0626                 spin_lock(ptl);
0627                 /*
0628                  * paravirt calls inside pte_clear here are
0629                  * superfluous.
0630                  */
0631                 pte_clear(vma->vm_mm, address, _pte);
0632                 spin_unlock(ptl);
0633             }
0634         } else {
0635             src_page = pte_page(pteval);
0636             copy_user_highpage(page, src_page, address, vma);
0637             VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
0638             release_pte_page(src_page);
0639             /*
0640              * ptl mostly unnecessary, but preempt has to
0641              * be disabled to update the per-cpu stats
0642              * inside page_remove_rmap().
0643              */
0644             spin_lock(ptl);
0645             /*
0646              * paravirt calls inside pte_clear here are
0647              * superfluous.
0648              */
0649             pte_clear(vma->vm_mm, address, _pte);
0650             page_remove_rmap(src_page, false);
0651             spin_unlock(ptl);
0652             free_page_and_swap_cache(src_page);
0653         }
0654 
0655         address += PAGE_SIZE;
0656         page++;
0657     }
0658 }
0659 
0660 static void khugepaged_alloc_sleep(void)
0661 {
0662     DEFINE_WAIT(wait);
0663 
0664     add_wait_queue(&khugepaged_wait, &wait);
0665     freezable_schedule_timeout_interruptible(
0666         msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
0667     remove_wait_queue(&khugepaged_wait, &wait);
0668 }
0669 
0670 static int khugepaged_node_load[MAX_NUMNODES];
0671 
0672 static bool khugepaged_scan_abort(int nid)
0673 {
0674     int i;
0675 
0676     /*
0677      * If node_reclaim_mode is disabled, then no extra effort is made to
0678      * allocate memory locally.
0679      */
0680     if (!node_reclaim_mode)
0681         return false;
0682 
0683     /* If there is a count for this node already, it must be acceptable */
0684     if (khugepaged_node_load[nid])
0685         return false;
0686 
0687     for (i = 0; i < MAX_NUMNODES; i++) {
0688         if (!khugepaged_node_load[i])
0689             continue;
0690         if (node_distance(nid, i) > RECLAIM_DISTANCE)
0691             return true;
0692     }
0693     return false;
0694 }
0695 
0696 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
0697 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
0698 {
0699     return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
0700 }
0701 
0702 #ifdef CONFIG_NUMA
0703 static int khugepaged_find_target_node(void)
0704 {
0705     static int last_khugepaged_target_node = NUMA_NO_NODE;
0706     int nid, target_node = 0, max_value = 0;
0707 
0708     /* find first node with max normal pages hit */
0709     for (nid = 0; nid < MAX_NUMNODES; nid++)
0710         if (khugepaged_node_load[nid] > max_value) {
0711             max_value = khugepaged_node_load[nid];
0712             target_node = nid;
0713         }
0714 
0715     /* do some balance if several nodes have the same hit record */
0716     if (target_node <= last_khugepaged_target_node)
0717         for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
0718                 nid++)
0719             if (max_value == khugepaged_node_load[nid]) {
0720                 target_node = nid;
0721                 break;
0722             }
0723 
0724     last_khugepaged_target_node = target_node;
0725     return target_node;
0726 }
0727 
0728 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
0729 {
0730     if (IS_ERR(*hpage)) {
0731         if (!*wait)
0732             return false;
0733 
0734         *wait = false;
0735         *hpage = NULL;
0736         khugepaged_alloc_sleep();
0737     } else if (*hpage) {
0738         put_page(*hpage);
0739         *hpage = NULL;
0740     }
0741 
0742     return true;
0743 }
0744 
0745 static struct page *
0746 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
0747 {
0748     VM_BUG_ON_PAGE(*hpage, *hpage);
0749 
0750     *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
0751     if (unlikely(!*hpage)) {
0752         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
0753         *hpage = ERR_PTR(-ENOMEM);
0754         return NULL;
0755     }
0756 
0757     prep_transhuge_page(*hpage);
0758     count_vm_event(THP_COLLAPSE_ALLOC);
0759     return *hpage;
0760 }
0761 #else
0762 static int khugepaged_find_target_node(void)
0763 {
0764     return 0;
0765 }
0766 
0767 static inline struct page *alloc_khugepaged_hugepage(void)
0768 {
0769     struct page *page;
0770 
0771     page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
0772                HPAGE_PMD_ORDER);
0773     if (page)
0774         prep_transhuge_page(page);
0775     return page;
0776 }
0777 
0778 static struct page *khugepaged_alloc_hugepage(bool *wait)
0779 {
0780     struct page *hpage;
0781 
0782     do {
0783         hpage = alloc_khugepaged_hugepage();
0784         if (!hpage) {
0785             count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
0786             if (!*wait)
0787                 return NULL;
0788 
0789             *wait = false;
0790             khugepaged_alloc_sleep();
0791         } else
0792             count_vm_event(THP_COLLAPSE_ALLOC);
0793     } while (unlikely(!hpage) && likely(khugepaged_enabled()));
0794 
0795     return hpage;
0796 }
0797 
0798 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
0799 {
0800     if (!*hpage)
0801         *hpage = khugepaged_alloc_hugepage(wait);
0802 
0803     if (unlikely(!*hpage))
0804         return false;
0805 
0806     return true;
0807 }
0808 
0809 static struct page *
0810 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
0811 {
0812     VM_BUG_ON(!*hpage);
0813 
0814     return  *hpage;
0815 }
0816 #endif
0817 
0818 static bool hugepage_vma_check(struct vm_area_struct *vma)
0819 {
0820     if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
0821         (vma->vm_flags & VM_NOHUGEPAGE))
0822         return false;
0823     if (shmem_file(vma->vm_file)) {
0824         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
0825             return false;
0826         return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
0827                 HPAGE_PMD_NR);
0828     }
0829     if (!vma->anon_vma || vma->vm_ops)
0830         return false;
0831     if (is_vma_temporary_stack(vma))
0832         return false;
0833     return !(vma->vm_flags & VM_NO_KHUGEPAGED);
0834 }
0835 
0836 /*
0837  * If mmap_sem temporarily dropped, revalidate vma
0838  * before taking mmap_sem.
0839  * Return 0 if succeeds, otherwise return none-zero
0840  * value (scan code).
0841  */
0842 
0843 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
0844         struct vm_area_struct **vmap)
0845 {
0846     struct vm_area_struct *vma;
0847     unsigned long hstart, hend;
0848 
0849     if (unlikely(khugepaged_test_exit(mm)))
0850         return SCAN_ANY_PROCESS;
0851 
0852     *vmap = vma = find_vma(mm, address);
0853     if (!vma)
0854         return SCAN_VMA_NULL;
0855 
0856     hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
0857     hend = vma->vm_end & HPAGE_PMD_MASK;
0858     if (address < hstart || address + HPAGE_PMD_SIZE > hend)
0859         return SCAN_ADDRESS_RANGE;
0860     if (!hugepage_vma_check(vma))
0861         return SCAN_VMA_CHECK;
0862     return 0;
0863 }
0864 
0865 /*
0866  * Bring missing pages in from swap, to complete THP collapse.
0867  * Only done if khugepaged_scan_pmd believes it is worthwhile.
0868  *
0869  * Called and returns without pte mapped or spinlocks held,
0870  * but with mmap_sem held to protect against vma changes.
0871  */
0872 
0873 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
0874                     struct vm_area_struct *vma,
0875                     unsigned long address, pmd_t *pmd,
0876                     int referenced)
0877 {
0878     int swapped_in = 0, ret = 0;
0879     struct vm_fault vmf = {
0880         .vma = vma,
0881         .address = address,
0882         .flags = FAULT_FLAG_ALLOW_RETRY,
0883         .pmd = pmd,
0884         .pgoff = linear_page_index(vma, address),
0885     };
0886 
0887     /* we only decide to swapin, if there is enough young ptes */
0888     if (referenced < HPAGE_PMD_NR/2) {
0889         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
0890         return false;
0891     }
0892     vmf.pte = pte_offset_map(pmd, address);
0893     for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
0894             vmf.pte++, vmf.address += PAGE_SIZE) {
0895         vmf.orig_pte = *vmf.pte;
0896         if (!is_swap_pte(vmf.orig_pte))
0897             continue;
0898         swapped_in++;
0899         ret = do_swap_page(&vmf);
0900 
0901         /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
0902         if (ret & VM_FAULT_RETRY) {
0903             down_read(&mm->mmap_sem);
0904             if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
0905                 /* vma is no longer available, don't continue to swapin */
0906                 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
0907                 return false;
0908             }
0909             /* check if the pmd is still valid */
0910             if (mm_find_pmd(mm, address) != pmd)
0911                 return false;
0912         }
0913         if (ret & VM_FAULT_ERROR) {
0914             trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
0915             return false;
0916         }
0917         /* pte is unmapped now, we need to map it */
0918         vmf.pte = pte_offset_map(pmd, vmf.address);
0919     }
0920     vmf.pte--;
0921     pte_unmap(vmf.pte);
0922     trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
0923     return true;
0924 }
0925 
0926 static void collapse_huge_page(struct mm_struct *mm,
0927                    unsigned long address,
0928                    struct page **hpage,
0929                    int node, int referenced)
0930 {
0931     pmd_t *pmd, _pmd;
0932     pte_t *pte;
0933     pgtable_t pgtable;
0934     struct page *new_page;
0935     spinlock_t *pmd_ptl, *pte_ptl;
0936     int isolated = 0, result = 0;
0937     struct mem_cgroup *memcg;
0938     struct vm_area_struct *vma;
0939     unsigned long mmun_start;   /* For mmu_notifiers */
0940     unsigned long mmun_end;     /* For mmu_notifiers */
0941     gfp_t gfp;
0942 
0943     VM_BUG_ON(address & ~HPAGE_PMD_MASK);
0944 
0945     /* Only allocate from the target node */
0946     gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
0947 
0948     /*
0949      * Before allocating the hugepage, release the mmap_sem read lock.
0950      * The allocation can take potentially a long time if it involves
0951      * sync compaction, and we do not need to hold the mmap_sem during
0952      * that. We will recheck the vma after taking it again in write mode.
0953      */
0954     up_read(&mm->mmap_sem);
0955     new_page = khugepaged_alloc_page(hpage, gfp, node);
0956     if (!new_page) {
0957         result = SCAN_ALLOC_HUGE_PAGE_FAIL;
0958         goto out_nolock;
0959     }
0960 
0961     if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
0962         result = SCAN_CGROUP_CHARGE_FAIL;
0963         goto out_nolock;
0964     }
0965 
0966     down_read(&mm->mmap_sem);
0967     result = hugepage_vma_revalidate(mm, address, &vma);
0968     if (result) {
0969         mem_cgroup_cancel_charge(new_page, memcg, true);
0970         up_read(&mm->mmap_sem);
0971         goto out_nolock;
0972     }
0973 
0974     pmd = mm_find_pmd(mm, address);
0975     if (!pmd) {
0976         result = SCAN_PMD_NULL;
0977         mem_cgroup_cancel_charge(new_page, memcg, true);
0978         up_read(&mm->mmap_sem);
0979         goto out_nolock;
0980     }
0981 
0982     /*
0983      * __collapse_huge_page_swapin always returns with mmap_sem locked.
0984      * If it fails, we release mmap_sem and jump out_nolock.
0985      * Continuing to collapse causes inconsistency.
0986      */
0987     if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
0988         mem_cgroup_cancel_charge(new_page, memcg, true);
0989         up_read(&mm->mmap_sem);
0990         goto out_nolock;
0991     }
0992 
0993     up_read(&mm->mmap_sem);
0994     /*
0995      * Prevent all access to pagetables with the exception of
0996      * gup_fast later handled by the ptep_clear_flush and the VM
0997      * handled by the anon_vma lock + PG_lock.
0998      */
0999     down_write(&mm->mmap_sem);
1000     result = hugepage_vma_revalidate(mm, address, &vma);
1001     if (result)
1002         goto out;
1003     /* check if the pmd is still valid */
1004     if (mm_find_pmd(mm, address) != pmd)
1005         goto out;
1006 
1007     anon_vma_lock_write(vma->anon_vma);
1008 
1009     pte = pte_offset_map(pmd, address);
1010     pte_ptl = pte_lockptr(mm, pmd);
1011 
1012     mmun_start = address;
1013     mmun_end   = address + HPAGE_PMD_SIZE;
1014     mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1015     pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1016     /*
1017      * After this gup_fast can't run anymore. This also removes
1018      * any huge TLB entry from the CPU so we won't allow
1019      * huge and small TLB entries for the same virtual address
1020      * to avoid the risk of CPU bugs in that area.
1021      */
1022     _pmd = pmdp_collapse_flush(vma, address, pmd);
1023     spin_unlock(pmd_ptl);
1024     mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1025 
1026     spin_lock(pte_ptl);
1027     isolated = __collapse_huge_page_isolate(vma, address, pte);
1028     spin_unlock(pte_ptl);
1029 
1030     if (unlikely(!isolated)) {
1031         pte_unmap(pte);
1032         spin_lock(pmd_ptl);
1033         BUG_ON(!pmd_none(*pmd));
1034         /*
1035          * We can only use set_pmd_at when establishing
1036          * hugepmds and never for establishing regular pmds that
1037          * points to regular pagetables. Use pmd_populate for that
1038          */
1039         pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1040         spin_unlock(pmd_ptl);
1041         anon_vma_unlock_write(vma->anon_vma);
1042         result = SCAN_FAIL;
1043         goto out;
1044     }
1045 
1046     /*
1047      * All pages are isolated and locked so anon_vma rmap
1048      * can't run anymore.
1049      */
1050     anon_vma_unlock_write(vma->anon_vma);
1051 
1052     __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1053     pte_unmap(pte);
1054     __SetPageUptodate(new_page);
1055     pgtable = pmd_pgtable(_pmd);
1056 
1057     _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1058     _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1059 
1060     /*
1061      * spin_lock() below is not the equivalent of smp_wmb(), so
1062      * this is needed to avoid the copy_huge_page writes to become
1063      * visible after the set_pmd_at() write.
1064      */
1065     smp_wmb();
1066 
1067     spin_lock(pmd_ptl);
1068     BUG_ON(!pmd_none(*pmd));
1069     page_add_new_anon_rmap(new_page, vma, address, true);
1070     mem_cgroup_commit_charge(new_page, memcg, false, true);
1071     lru_cache_add_active_or_unevictable(new_page, vma);
1072     pgtable_trans_huge_deposit(mm, pmd, pgtable);
1073     set_pmd_at(mm, address, pmd, _pmd);
1074     update_mmu_cache_pmd(vma, address, pmd);
1075     spin_unlock(pmd_ptl);
1076 
1077     *hpage = NULL;
1078 
1079     khugepaged_pages_collapsed++;
1080     result = SCAN_SUCCEED;
1081 out_up_write:
1082     up_write(&mm->mmap_sem);
1083 out_nolock:
1084     trace_mm_collapse_huge_page(mm, isolated, result);
1085     return;
1086 out:
1087     mem_cgroup_cancel_charge(new_page, memcg, true);
1088     goto out_up_write;
1089 }
1090 
1091 static int khugepaged_scan_pmd(struct mm_struct *mm,
1092                    struct vm_area_struct *vma,
1093                    unsigned long address,
1094                    struct page **hpage)
1095 {
1096     pmd_t *pmd;
1097     pte_t *pte, *_pte;
1098     int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1099     struct page *page = NULL;
1100     unsigned long _address;
1101     spinlock_t *ptl;
1102     int node = NUMA_NO_NODE, unmapped = 0;
1103     bool writable = false;
1104 
1105     VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1106 
1107     pmd = mm_find_pmd(mm, address);
1108     if (!pmd) {
1109         result = SCAN_PMD_NULL;
1110         goto out;
1111     }
1112 
1113     memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1114     pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1115     for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1116          _pte++, _address += PAGE_SIZE) {
1117         pte_t pteval = *_pte;
1118         if (is_swap_pte(pteval)) {
1119             if (++unmapped <= khugepaged_max_ptes_swap) {
1120                 continue;
1121             } else {
1122                 result = SCAN_EXCEED_SWAP_PTE;
1123                 goto out_unmap;
1124             }
1125         }
1126         if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1127             if (!userfaultfd_armed(vma) &&
1128                 ++none_or_zero <= khugepaged_max_ptes_none) {
1129                 continue;
1130             } else {
1131                 result = SCAN_EXCEED_NONE_PTE;
1132                 goto out_unmap;
1133             }
1134         }
1135         if (!pte_present(pteval)) {
1136             result = SCAN_PTE_NON_PRESENT;
1137             goto out_unmap;
1138         }
1139         if (pte_write(pteval))
1140             writable = true;
1141 
1142         page = vm_normal_page(vma, _address, pteval);
1143         if (unlikely(!page)) {
1144             result = SCAN_PAGE_NULL;
1145             goto out_unmap;
1146         }
1147 
1148         /* TODO: teach khugepaged to collapse THP mapped with pte */
1149         if (PageCompound(page)) {
1150             result = SCAN_PAGE_COMPOUND;
1151             goto out_unmap;
1152         }
1153 
1154         /*
1155          * Record which node the original page is from and save this
1156          * information to khugepaged_node_load[].
1157          * Khupaged will allocate hugepage from the node has the max
1158          * hit record.
1159          */
1160         node = page_to_nid(page);
1161         if (khugepaged_scan_abort(node)) {
1162             result = SCAN_SCAN_ABORT;
1163             goto out_unmap;
1164         }
1165         khugepaged_node_load[node]++;
1166         if (!PageLRU(page)) {
1167             result = SCAN_PAGE_LRU;
1168             goto out_unmap;
1169         }
1170         if (PageLocked(page)) {
1171             result = SCAN_PAGE_LOCK;
1172             goto out_unmap;
1173         }
1174         if (!PageAnon(page)) {
1175             result = SCAN_PAGE_ANON;
1176             goto out_unmap;
1177         }
1178 
1179         /*
1180          * cannot use mapcount: can't collapse if there's a gup pin.
1181          * The page must only be referenced by the scanned process
1182          * and page swap cache.
1183          */
1184         if (page_count(page) != 1 + !!PageSwapCache(page)) {
1185             result = SCAN_PAGE_COUNT;
1186             goto out_unmap;
1187         }
1188         if (pte_young(pteval) ||
1189             page_is_young(page) || PageReferenced(page) ||
1190             mmu_notifier_test_young(vma->vm_mm, address))
1191             referenced++;
1192     }
1193     if (writable) {
1194         if (referenced) {
1195             result = SCAN_SUCCEED;
1196             ret = 1;
1197         } else {
1198             result = SCAN_LACK_REFERENCED_PAGE;
1199         }
1200     } else {
1201         result = SCAN_PAGE_RO;
1202     }
1203 out_unmap:
1204     pte_unmap_unlock(pte, ptl);
1205     if (ret) {
1206         node = khugepaged_find_target_node();
1207         /* collapse_huge_page will return with the mmap_sem released */
1208         collapse_huge_page(mm, address, hpage, node, referenced);
1209     }
1210 out:
1211     trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1212                      none_or_zero, result, unmapped);
1213     return ret;
1214 }
1215 
1216 static void collect_mm_slot(struct mm_slot *mm_slot)
1217 {
1218     struct mm_struct *mm = mm_slot->mm;
1219 
1220     VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1221 
1222     if (khugepaged_test_exit(mm)) {
1223         /* free mm_slot */
1224         hash_del(&mm_slot->hash);
1225         list_del(&mm_slot->mm_node);
1226 
1227         /*
1228          * Not strictly needed because the mm exited already.
1229          *
1230          * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1231          */
1232 
1233         /* khugepaged_mm_lock actually not necessary for the below */
1234         free_mm_slot(mm_slot);
1235         mmdrop(mm);
1236     }
1237 }
1238 
1239 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1240 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1241 {
1242     struct vm_area_struct *vma;
1243     unsigned long addr;
1244     pmd_t *pmd, _pmd;
1245 
1246     i_mmap_lock_write(mapping);
1247     vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1248         /* probably overkill */
1249         if (vma->anon_vma)
1250             continue;
1251         addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1252         if (addr & ~HPAGE_PMD_MASK)
1253             continue;
1254         if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1255             continue;
1256         pmd = mm_find_pmd(vma->vm_mm, addr);
1257         if (!pmd)
1258             continue;
1259         /*
1260          * We need exclusive mmap_sem to retract page table.
1261          * If trylock fails we would end up with pte-mapped THP after
1262          * re-fault. Not ideal, but it's more important to not disturb
1263          * the system too much.
1264          */
1265         if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1266             spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1267             /* assume page table is clear */
1268             _pmd = pmdp_collapse_flush(vma, addr, pmd);
1269             spin_unlock(ptl);
1270             up_write(&vma->vm_mm->mmap_sem);
1271             atomic_long_dec(&vma->vm_mm->nr_ptes);
1272             pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1273         }
1274     }
1275     i_mmap_unlock_write(mapping);
1276 }
1277 
1278 /**
1279  * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1280  *
1281  * Basic scheme is simple, details are more complex:
1282  *  - allocate and freeze a new huge page;
1283  *  - scan over radix tree replacing old pages the new one
1284  *    + swap in pages if necessary;
1285  *    + fill in gaps;
1286  *    + keep old pages around in case if rollback is required;
1287  *  - if replacing succeed:
1288  *    + copy data over;
1289  *    + free old pages;
1290  *    + unfreeze huge page;
1291  *  - if replacing failed;
1292  *    + put all pages back and unfreeze them;
1293  *    + restore gaps in the radix-tree;
1294  *    + free huge page;
1295  */
1296 static void collapse_shmem(struct mm_struct *mm,
1297         struct address_space *mapping, pgoff_t start,
1298         struct page **hpage, int node)
1299 {
1300     gfp_t gfp;
1301     struct page *page, *new_page, *tmp;
1302     struct mem_cgroup *memcg;
1303     pgoff_t index, end = start + HPAGE_PMD_NR;
1304     LIST_HEAD(pagelist);
1305     struct radix_tree_iter iter;
1306     void **slot;
1307     int nr_none = 0, result = SCAN_SUCCEED;
1308 
1309     VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1310 
1311     /* Only allocate from the target node */
1312     gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1313 
1314     new_page = khugepaged_alloc_page(hpage, gfp, node);
1315     if (!new_page) {
1316         result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1317         goto out;
1318     }
1319 
1320     if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1321         result = SCAN_CGROUP_CHARGE_FAIL;
1322         goto out;
1323     }
1324 
1325     new_page->index = start;
1326     new_page->mapping = mapping;
1327     __SetPageSwapBacked(new_page);
1328     __SetPageLocked(new_page);
1329     BUG_ON(!page_ref_freeze(new_page, 1));
1330 
1331 
1332     /*
1333      * At this point the new_page is 'frozen' (page_count() is zero), locked
1334      * and not up-to-date. It's safe to insert it into radix tree, because
1335      * nobody would be able to map it or use it in other way until we
1336      * unfreeze it.
1337      */
1338 
1339     index = start;
1340     spin_lock_irq(&mapping->tree_lock);
1341     radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1342         int n = min(iter.index, end) - index;
1343 
1344         /*
1345          * Handle holes in the radix tree: charge it from shmem and
1346          * insert relevant subpage of new_page into the radix-tree.
1347          */
1348         if (n && !shmem_charge(mapping->host, n)) {
1349             result = SCAN_FAIL;
1350             break;
1351         }
1352         nr_none += n;
1353         for (; index < min(iter.index, end); index++) {
1354             radix_tree_insert(&mapping->page_tree, index,
1355                     new_page + (index % HPAGE_PMD_NR));
1356         }
1357 
1358         /* We are done. */
1359         if (index >= end)
1360             break;
1361 
1362         page = radix_tree_deref_slot_protected(slot,
1363                 &mapping->tree_lock);
1364         if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1365             spin_unlock_irq(&mapping->tree_lock);
1366             /* swap in or instantiate fallocated page */
1367             if (shmem_getpage(mapping->host, index, &page,
1368                         SGP_NOHUGE)) {
1369                 result = SCAN_FAIL;
1370                 goto tree_unlocked;
1371             }
1372             spin_lock_irq(&mapping->tree_lock);
1373         } else if (trylock_page(page)) {
1374             get_page(page);
1375         } else {
1376             result = SCAN_PAGE_LOCK;
1377             break;
1378         }
1379 
1380         /*
1381          * The page must be locked, so we can drop the tree_lock
1382          * without racing with truncate.
1383          */
1384         VM_BUG_ON_PAGE(!PageLocked(page), page);
1385         VM_BUG_ON_PAGE(!PageUptodate(page), page);
1386         VM_BUG_ON_PAGE(PageTransCompound(page), page);
1387 
1388         if (page_mapping(page) != mapping) {
1389             result = SCAN_TRUNCATED;
1390             goto out_unlock;
1391         }
1392         spin_unlock_irq(&mapping->tree_lock);
1393 
1394         if (isolate_lru_page(page)) {
1395             result = SCAN_DEL_PAGE_LRU;
1396             goto out_isolate_failed;
1397         }
1398 
1399         if (page_mapped(page))
1400             unmap_mapping_range(mapping, index << PAGE_SHIFT,
1401                     PAGE_SIZE, 0);
1402 
1403         spin_lock_irq(&mapping->tree_lock);
1404 
1405         slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1406         VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1407                     &mapping->tree_lock), page);
1408         VM_BUG_ON_PAGE(page_mapped(page), page);
1409 
1410         /*
1411          * The page is expected to have page_count() == 3:
1412          *  - we hold a pin on it;
1413          *  - one reference from radix tree;
1414          *  - one from isolate_lru_page;
1415          */
1416         if (!page_ref_freeze(page, 3)) {
1417             result = SCAN_PAGE_COUNT;
1418             goto out_lru;
1419         }
1420 
1421         /*
1422          * Add the page to the list to be able to undo the collapse if
1423          * something go wrong.
1424          */
1425         list_add_tail(&page->lru, &pagelist);
1426 
1427         /* Finally, replace with the new page. */
1428         radix_tree_replace_slot(&mapping->page_tree, slot,
1429                 new_page + (index % HPAGE_PMD_NR));
1430 
1431         slot = radix_tree_iter_resume(slot, &iter);
1432         index++;
1433         continue;
1434 out_lru:
1435         spin_unlock_irq(&mapping->tree_lock);
1436         putback_lru_page(page);
1437 out_isolate_failed:
1438         unlock_page(page);
1439         put_page(page);
1440         goto tree_unlocked;
1441 out_unlock:
1442         unlock_page(page);
1443         put_page(page);
1444         break;
1445     }
1446 
1447     /*
1448      * Handle hole in radix tree at the end of the range.
1449      * This code only triggers if there's nothing in radix tree
1450      * beyond 'end'.
1451      */
1452     if (result == SCAN_SUCCEED && index < end) {
1453         int n = end - index;
1454 
1455         if (!shmem_charge(mapping->host, n)) {
1456             result = SCAN_FAIL;
1457             goto tree_locked;
1458         }
1459 
1460         for (; index < end; index++) {
1461             radix_tree_insert(&mapping->page_tree, index,
1462                     new_page + (index % HPAGE_PMD_NR));
1463         }
1464         nr_none += n;
1465     }
1466 
1467 tree_locked:
1468     spin_unlock_irq(&mapping->tree_lock);
1469 tree_unlocked:
1470 
1471     if (result == SCAN_SUCCEED) {
1472         unsigned long flags;
1473         struct zone *zone = page_zone(new_page);
1474 
1475         /*
1476          * Replacing old pages with new one has succeed, now we need to
1477          * copy the content and free old pages.
1478          */
1479         list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1480             copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1481                     page);
1482             list_del(&page->lru);
1483             unlock_page(page);
1484             page_ref_unfreeze(page, 1);
1485             page->mapping = NULL;
1486             ClearPageActive(page);
1487             ClearPageUnevictable(page);
1488             put_page(page);
1489         }
1490 
1491         local_irq_save(flags);
1492         __inc_node_page_state(new_page, NR_SHMEM_THPS);
1493         if (nr_none) {
1494             __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1495             __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1496         }
1497         local_irq_restore(flags);
1498 
1499         /*
1500          * Remove pte page tables, so we can re-faulti
1501          * the page as huge.
1502          */
1503         retract_page_tables(mapping, start);
1504 
1505         /* Everything is ready, let's unfreeze the new_page */
1506         set_page_dirty(new_page);
1507         SetPageUptodate(new_page);
1508         page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1509         mem_cgroup_commit_charge(new_page, memcg, false, true);
1510         lru_cache_add_anon(new_page);
1511         unlock_page(new_page);
1512 
1513         *hpage = NULL;
1514     } else {
1515         /* Something went wrong: rollback changes to the radix-tree */
1516         shmem_uncharge(mapping->host, nr_none);
1517         spin_lock_irq(&mapping->tree_lock);
1518         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1519                 start) {
1520             if (iter.index >= end)
1521                 break;
1522             page = list_first_entry_or_null(&pagelist,
1523                     struct page, lru);
1524             if (!page || iter.index < page->index) {
1525                 if (!nr_none)
1526                     break;
1527                 nr_none--;
1528                 /* Put holes back where they were */
1529                 radix_tree_delete(&mapping->page_tree,
1530                           iter.index);
1531                 continue;
1532             }
1533 
1534             VM_BUG_ON_PAGE(page->index != iter.index, page);
1535 
1536             /* Unfreeze the page. */
1537             list_del(&page->lru);
1538             page_ref_unfreeze(page, 2);
1539             radix_tree_replace_slot(&mapping->page_tree,
1540                         slot, page);
1541             slot = radix_tree_iter_resume(slot, &iter);
1542             spin_unlock_irq(&mapping->tree_lock);
1543             putback_lru_page(page);
1544             unlock_page(page);
1545             spin_lock_irq(&mapping->tree_lock);
1546         }
1547         VM_BUG_ON(nr_none);
1548         spin_unlock_irq(&mapping->tree_lock);
1549 
1550         /* Unfreeze new_page, caller would take care about freeing it */
1551         page_ref_unfreeze(new_page, 1);
1552         mem_cgroup_cancel_charge(new_page, memcg, true);
1553         unlock_page(new_page);
1554         new_page->mapping = NULL;
1555     }
1556 out:
1557     VM_BUG_ON(!list_empty(&pagelist));
1558     /* TODO: tracepoints */
1559 }
1560 
1561 static void khugepaged_scan_shmem(struct mm_struct *mm,
1562         struct address_space *mapping,
1563         pgoff_t start, struct page **hpage)
1564 {
1565     struct page *page = NULL;
1566     struct radix_tree_iter iter;
1567     void **slot;
1568     int present, swap;
1569     int node = NUMA_NO_NODE;
1570     int result = SCAN_SUCCEED;
1571 
1572     present = 0;
1573     swap = 0;
1574     memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1575     rcu_read_lock();
1576     radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1577         if (iter.index >= start + HPAGE_PMD_NR)
1578             break;
1579 
1580         page = radix_tree_deref_slot(slot);
1581         if (radix_tree_deref_retry(page)) {
1582             slot = radix_tree_iter_retry(&iter);
1583             continue;
1584         }
1585 
1586         if (radix_tree_exception(page)) {
1587             if (++swap > khugepaged_max_ptes_swap) {
1588                 result = SCAN_EXCEED_SWAP_PTE;
1589                 break;
1590             }
1591             continue;
1592         }
1593 
1594         if (PageTransCompound(page)) {
1595             result = SCAN_PAGE_COMPOUND;
1596             break;
1597         }
1598 
1599         node = page_to_nid(page);
1600         if (khugepaged_scan_abort(node)) {
1601             result = SCAN_SCAN_ABORT;
1602             break;
1603         }
1604         khugepaged_node_load[node]++;
1605 
1606         if (!PageLRU(page)) {
1607             result = SCAN_PAGE_LRU;
1608             break;
1609         }
1610 
1611         if (page_count(page) != 1 + page_mapcount(page)) {
1612             result = SCAN_PAGE_COUNT;
1613             break;
1614         }
1615 
1616         /*
1617          * We probably should check if the page is referenced here, but
1618          * nobody would transfer pte_young() to PageReferenced() for us.
1619          * And rmap walk here is just too costly...
1620          */
1621 
1622         present++;
1623 
1624         if (need_resched()) {
1625             slot = radix_tree_iter_resume(slot, &iter);
1626             cond_resched_rcu();
1627         }
1628     }
1629     rcu_read_unlock();
1630 
1631     if (result == SCAN_SUCCEED) {
1632         if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1633             result = SCAN_EXCEED_NONE_PTE;
1634         } else {
1635             node = khugepaged_find_target_node();
1636             collapse_shmem(mm, mapping, start, hpage, node);
1637         }
1638     }
1639 
1640     /* TODO: tracepoints */
1641 }
1642 #else
1643 static void khugepaged_scan_shmem(struct mm_struct *mm,
1644         struct address_space *mapping,
1645         pgoff_t start, struct page **hpage)
1646 {
1647     BUILD_BUG();
1648 }
1649 #endif
1650 
1651 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1652                         struct page **hpage)
1653     __releases(&khugepaged_mm_lock)
1654     __acquires(&khugepaged_mm_lock)
1655 {
1656     struct mm_slot *mm_slot;
1657     struct mm_struct *mm;
1658     struct vm_area_struct *vma;
1659     int progress = 0;
1660 
1661     VM_BUG_ON(!pages);
1662     VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1663 
1664     if (khugepaged_scan.mm_slot)
1665         mm_slot = khugepaged_scan.mm_slot;
1666     else {
1667         mm_slot = list_entry(khugepaged_scan.mm_head.next,
1668                      struct mm_slot, mm_node);
1669         khugepaged_scan.address = 0;
1670         khugepaged_scan.mm_slot = mm_slot;
1671     }
1672     spin_unlock(&khugepaged_mm_lock);
1673 
1674     mm = mm_slot->mm;
1675     down_read(&mm->mmap_sem);
1676     if (unlikely(khugepaged_test_exit(mm)))
1677         vma = NULL;
1678     else
1679         vma = find_vma(mm, khugepaged_scan.address);
1680 
1681     progress++;
1682     for (; vma; vma = vma->vm_next) {
1683         unsigned long hstart, hend;
1684 
1685         cond_resched();
1686         if (unlikely(khugepaged_test_exit(mm))) {
1687             progress++;
1688             break;
1689         }
1690         if (!hugepage_vma_check(vma)) {
1691 skip:
1692             progress++;
1693             continue;
1694         }
1695         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1696         hend = vma->vm_end & HPAGE_PMD_MASK;
1697         if (hstart >= hend)
1698             goto skip;
1699         if (khugepaged_scan.address > hend)
1700             goto skip;
1701         if (khugepaged_scan.address < hstart)
1702             khugepaged_scan.address = hstart;
1703         VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1704 
1705         while (khugepaged_scan.address < hend) {
1706             int ret;
1707             cond_resched();
1708             if (unlikely(khugepaged_test_exit(mm)))
1709                 goto breakouterloop;
1710 
1711             VM_BUG_ON(khugepaged_scan.address < hstart ||
1712                   khugepaged_scan.address + HPAGE_PMD_SIZE >
1713                   hend);
1714             if (shmem_file(vma->vm_file)) {
1715                 struct file *file;
1716                 pgoff_t pgoff = linear_page_index(vma,
1717                         khugepaged_scan.address);
1718                 if (!shmem_huge_enabled(vma))
1719                     goto skip;
1720                 file = get_file(vma->vm_file);
1721                 up_read(&mm->mmap_sem);
1722                 ret = 1;
1723                 khugepaged_scan_shmem(mm, file->f_mapping,
1724                         pgoff, hpage);
1725                 fput(file);
1726             } else {
1727                 ret = khugepaged_scan_pmd(mm, vma,
1728                         khugepaged_scan.address,
1729                         hpage);
1730             }
1731             /* move to next address */
1732             khugepaged_scan.address += HPAGE_PMD_SIZE;
1733             progress += HPAGE_PMD_NR;
1734             if (ret)
1735                 /* we released mmap_sem so break loop */
1736                 goto breakouterloop_mmap_sem;
1737             if (progress >= pages)
1738                 goto breakouterloop;
1739         }
1740     }
1741 breakouterloop:
1742     up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1743 breakouterloop_mmap_sem:
1744 
1745     spin_lock(&khugepaged_mm_lock);
1746     VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1747     /*
1748      * Release the current mm_slot if this mm is about to die, or
1749      * if we scanned all vmas of this mm.
1750      */
1751     if (khugepaged_test_exit(mm) || !vma) {
1752         /*
1753          * Make sure that if mm_users is reaching zero while
1754          * khugepaged runs here, khugepaged_exit will find
1755          * mm_slot not pointing to the exiting mm.
1756          */
1757         if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1758             khugepaged_scan.mm_slot = list_entry(
1759                 mm_slot->mm_node.next,
1760                 struct mm_slot, mm_node);
1761             khugepaged_scan.address = 0;
1762         } else {
1763             khugepaged_scan.mm_slot = NULL;
1764             khugepaged_full_scans++;
1765         }
1766 
1767         collect_mm_slot(mm_slot);
1768     }
1769 
1770     return progress;
1771 }
1772 
1773 static int khugepaged_has_work(void)
1774 {
1775     return !list_empty(&khugepaged_scan.mm_head) &&
1776         khugepaged_enabled();
1777 }
1778 
1779 static int khugepaged_wait_event(void)
1780 {
1781     return !list_empty(&khugepaged_scan.mm_head) ||
1782         kthread_should_stop();
1783 }
1784 
1785 static void khugepaged_do_scan(void)
1786 {
1787     struct page *hpage = NULL;
1788     unsigned int progress = 0, pass_through_head = 0;
1789     unsigned int pages = khugepaged_pages_to_scan;
1790     bool wait = true;
1791 
1792     barrier(); /* write khugepaged_pages_to_scan to local stack */
1793 
1794     while (progress < pages) {
1795         if (!khugepaged_prealloc_page(&hpage, &wait))
1796             break;
1797 
1798         cond_resched();
1799 
1800         if (unlikely(kthread_should_stop() || try_to_freeze()))
1801             break;
1802 
1803         spin_lock(&khugepaged_mm_lock);
1804         if (!khugepaged_scan.mm_slot)
1805             pass_through_head++;
1806         if (khugepaged_has_work() &&
1807             pass_through_head < 2)
1808             progress += khugepaged_scan_mm_slot(pages - progress,
1809                                 &hpage);
1810         else
1811             progress = pages;
1812         spin_unlock(&khugepaged_mm_lock);
1813     }
1814 
1815     if (!IS_ERR_OR_NULL(hpage))
1816         put_page(hpage);
1817 }
1818 
1819 static bool khugepaged_should_wakeup(void)
1820 {
1821     return kthread_should_stop() ||
1822            time_after_eq(jiffies, khugepaged_sleep_expire);
1823 }
1824 
1825 static void khugepaged_wait_work(void)
1826 {
1827     if (khugepaged_has_work()) {
1828         const unsigned long scan_sleep_jiffies =
1829             msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1830 
1831         if (!scan_sleep_jiffies)
1832             return;
1833 
1834         khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1835         wait_event_freezable_timeout(khugepaged_wait,
1836                          khugepaged_should_wakeup(),
1837                          scan_sleep_jiffies);
1838         return;
1839     }
1840 
1841     if (khugepaged_enabled())
1842         wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1843 }
1844 
1845 static int khugepaged(void *none)
1846 {
1847     struct mm_slot *mm_slot;
1848 
1849     set_freezable();
1850     set_user_nice(current, MAX_NICE);
1851 
1852     while (!kthread_should_stop()) {
1853         khugepaged_do_scan();
1854         khugepaged_wait_work();
1855     }
1856 
1857     spin_lock(&khugepaged_mm_lock);
1858     mm_slot = khugepaged_scan.mm_slot;
1859     khugepaged_scan.mm_slot = NULL;
1860     if (mm_slot)
1861         collect_mm_slot(mm_slot);
1862     spin_unlock(&khugepaged_mm_lock);
1863     return 0;
1864 }
1865 
1866 static void set_recommended_min_free_kbytes(void)
1867 {
1868     struct zone *zone;
1869     int nr_zones = 0;
1870     unsigned long recommended_min;
1871 
1872     for_each_populated_zone(zone)
1873         nr_zones++;
1874 
1875     /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1876     recommended_min = pageblock_nr_pages * nr_zones * 2;
1877 
1878     /*
1879      * Make sure that on average at least two pageblocks are almost free
1880      * of another type, one for a migratetype to fall back to and a
1881      * second to avoid subsequent fallbacks of other types There are 3
1882      * MIGRATE_TYPES we care about.
1883      */
1884     recommended_min += pageblock_nr_pages * nr_zones *
1885                MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1886 
1887     /* don't ever allow to reserve more than 5% of the lowmem */
1888     recommended_min = min(recommended_min,
1889                   (unsigned long) nr_free_buffer_pages() / 20);
1890     recommended_min <<= (PAGE_SHIFT-10);
1891 
1892     if (recommended_min > min_free_kbytes) {
1893         if (user_min_free_kbytes >= 0)
1894             pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1895                 min_free_kbytes, recommended_min);
1896 
1897         min_free_kbytes = recommended_min;
1898     }
1899     setup_per_zone_wmarks();
1900 }
1901 
1902 int start_stop_khugepaged(void)
1903 {
1904     static struct task_struct *khugepaged_thread __read_mostly;
1905     static DEFINE_MUTEX(khugepaged_mutex);
1906     int err = 0;
1907 
1908     mutex_lock(&khugepaged_mutex);
1909     if (khugepaged_enabled()) {
1910         if (!khugepaged_thread)
1911             khugepaged_thread = kthread_run(khugepaged, NULL,
1912                             "khugepaged");
1913         if (IS_ERR(khugepaged_thread)) {
1914             pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1915             err = PTR_ERR(khugepaged_thread);
1916             khugepaged_thread = NULL;
1917             goto fail;
1918         }
1919 
1920         if (!list_empty(&khugepaged_scan.mm_head))
1921             wake_up_interruptible(&khugepaged_wait);
1922 
1923         set_recommended_min_free_kbytes();
1924     } else if (khugepaged_thread) {
1925         kthread_stop(khugepaged_thread);
1926         khugepaged_thread = NULL;
1927     }
1928 fail:
1929     mutex_unlock(&khugepaged_mutex);
1930     return err;
1931 }