Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef __LINUX_KSM_H
0003 #define __LINUX_KSM_H
0004 /*
0005  * Memory merging support.
0006  *
0007  * This code enables dynamic sharing of identical pages found in different
0008  * memory areas, even if they are not shared by fork().
0009  */
0010 
0011 #include <linux/bitops.h>
0012 #include <linux/mm.h>
0013 #include <linux/pagemap.h>
0014 #include <linux/rmap.h>
0015 #include <linux/sched.h>
0016 #include <linux/sched/coredump.h>
0017 
0018 struct stable_node;
0019 struct mem_cgroup;
0020 
0021 #ifdef CONFIG_KSM
0022 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
0023         unsigned long end, int advice, unsigned long *vm_flags);
0024 int __ksm_enter(struct mm_struct *mm);
0025 void __ksm_exit(struct mm_struct *mm);
0026 
0027 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
0028 {
0029     if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
0030         return __ksm_enter(mm);
0031     return 0;
0032 }
0033 
0034 static inline void ksm_exit(struct mm_struct *mm)
0035 {
0036     if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
0037         __ksm_exit(mm);
0038 }
0039 
0040 /*
0041  * When do_swap_page() first faults in from swap what used to be a KSM page,
0042  * no problem, it will be assigned to this vma's anon_vma; but thereafter,
0043  * it might be faulted into a different anon_vma (or perhaps to a different
0044  * offset in the same anon_vma).  do_swap_page() cannot do all the locking
0045  * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
0046  * a copy, and leave remerging the pages to a later pass of ksmd.
0047  *
0048  * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
0049  * but what if the vma was unmerged while the page was swapped out?
0050  */
0051 struct page *ksm_might_need_to_copy(struct page *page,
0052             struct vm_area_struct *vma, unsigned long address);
0053 
0054 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
0055 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
0056 
0057 #else  /* !CONFIG_KSM */
0058 
0059 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
0060 {
0061     return 0;
0062 }
0063 
0064 static inline void ksm_exit(struct mm_struct *mm)
0065 {
0066 }
0067 
0068 #ifdef CONFIG_MMU
0069 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
0070         unsigned long end, int advice, unsigned long *vm_flags)
0071 {
0072     return 0;
0073 }
0074 
0075 static inline struct page *ksm_might_need_to_copy(struct page *page,
0076             struct vm_area_struct *vma, unsigned long address)
0077 {
0078     return page;
0079 }
0080 
0081 static inline void rmap_walk_ksm(struct folio *folio,
0082             struct rmap_walk_control *rwc)
0083 {
0084 }
0085 
0086 static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
0087 {
0088 }
0089 #endif /* CONFIG_MMU */
0090 #endif /* !CONFIG_KSM */
0091 
0092 #endif /* __LINUX_KSM_H */