0001
0002 #ifndef _LINUX_MIGRATE_H
0003 #define _LINUX_MIGRATE_H
0004
0005 #include <linux/mm.h>
0006 #include <linux/mempolicy.h>
0007 #include <linux/migrate_mode.h>
0008 #include <linux/hugetlb.h>
0009
0010 typedef struct page *new_page_t(struct page *page, unsigned long private);
0011 typedef void free_page_t(struct page *page, unsigned long private);
0012
0013 struct migration_target_control;
0014
0015
0016
0017
0018
0019
0020 #define MIGRATEPAGE_SUCCESS 0
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 struct movable_operations {
0053 bool (*isolate_page)(struct page *, isolate_mode_t);
0054 int (*migrate_page)(struct page *dst, struct page *src,
0055 enum migrate_mode);
0056 void (*putback_page)(struct page *);
0057 };
0058
0059
0060 extern const char *migrate_reason_names[MR_TYPES];
0061
0062 #ifdef CONFIG_MIGRATION
0063
0064 extern void putback_movable_pages(struct list_head *l);
0065 int migrate_folio(struct address_space *mapping, struct folio *dst,
0066 struct folio *src, enum migrate_mode mode);
0067 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
0068 unsigned long private, enum migrate_mode mode, int reason,
0069 unsigned int *ret_succeeded);
0070 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
0071 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
0072
0073 int migrate_huge_page_move_mapping(struct address_space *mapping,
0074 struct folio *dst, struct folio *src);
0075 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
0076 spinlock_t *ptl);
0077 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
0078 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
0079 int folio_migrate_mapping(struct address_space *mapping,
0080 struct folio *newfolio, struct folio *folio, int extra_count);
0081
0082 #else
0083
0084 static inline void putback_movable_pages(struct list_head *l) {}
0085 static inline int migrate_pages(struct list_head *l, new_page_t new,
0086 free_page_t free, unsigned long private, enum migrate_mode mode,
0087 int reason, unsigned int *ret_succeeded)
0088 { return -ENOSYS; }
0089 static inline struct page *alloc_migration_target(struct page *page,
0090 unsigned long private)
0091 { return NULL; }
0092 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
0093 { return -EBUSY; }
0094
0095 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
0096 struct folio *dst, struct folio *src)
0097 {
0098 return -ENOSYS;
0099 }
0100
0101 #endif
0102
0103 #if defined(CONFIG_MIGRATION) && defined(CONFIG_NUMA)
0104 extern void set_migration_target_nodes(void);
0105 extern void migrate_on_reclaim_init(void);
0106 extern bool numa_demotion_enabled;
0107 extern int next_demotion_node(int node);
0108 #else
0109 static inline void set_migration_target_nodes(void) {}
0110 static inline void migrate_on_reclaim_init(void) {}
0111 static inline int next_demotion_node(int node)
0112 {
0113 return NUMA_NO_NODE;
0114 }
0115 #define numa_demotion_enabled false
0116 #endif
0117
0118 #ifdef CONFIG_COMPACTION
0119 bool PageMovable(struct page *page);
0120 void __SetPageMovable(struct page *page, const struct movable_operations *ops);
0121 void __ClearPageMovable(struct page *page);
0122 #else
0123 static inline bool PageMovable(struct page *page) { return false; }
0124 static inline void __SetPageMovable(struct page *page,
0125 const struct movable_operations *ops)
0126 {
0127 }
0128 static inline void __ClearPageMovable(struct page *page)
0129 {
0130 }
0131 #endif
0132
0133 static inline bool folio_test_movable(struct folio *folio)
0134 {
0135 return PageMovable(&folio->page);
0136 }
0137
0138 static inline
0139 const struct movable_operations *page_movable_ops(struct page *page)
0140 {
0141 VM_BUG_ON(!__PageMovable(page));
0142
0143 return (const struct movable_operations *)
0144 ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
0145 }
0146
0147 #ifdef CONFIG_NUMA_BALANCING
0148 extern int migrate_misplaced_page(struct page *page,
0149 struct vm_area_struct *vma, int node);
0150 #else
0151 static inline int migrate_misplaced_page(struct page *page,
0152 struct vm_area_struct *vma, int node)
0153 {
0154 return -EAGAIN;
0155 }
0156 #endif
0157
0158 #ifdef CONFIG_MIGRATION
0159
0160
0161
0162
0163
0164
0165 #define MIGRATE_PFN_VALID (1UL << 0)
0166 #define MIGRATE_PFN_MIGRATE (1UL << 1)
0167 #define MIGRATE_PFN_WRITE (1UL << 3)
0168 #define MIGRATE_PFN_SHIFT 6
0169
0170 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
0171 {
0172 if (!(mpfn & MIGRATE_PFN_VALID))
0173 return NULL;
0174 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
0175 }
0176
0177 static inline unsigned long migrate_pfn(unsigned long pfn)
0178 {
0179 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
0180 }
0181
0182 enum migrate_vma_direction {
0183 MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
0184 MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
0185 MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
0186 };
0187
0188 struct migrate_vma {
0189 struct vm_area_struct *vma;
0190
0191
0192
0193
0194
0195
0196
0197
0198 unsigned long *dst;
0199 unsigned long *src;
0200 unsigned long cpages;
0201 unsigned long npages;
0202 unsigned long start;
0203 unsigned long end;
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213 void *pgmap_owner;
0214 unsigned long flags;
0215 };
0216
0217 int migrate_vma_setup(struct migrate_vma *args);
0218 void migrate_vma_pages(struct migrate_vma *migrate);
0219 void migrate_vma_finalize(struct migrate_vma *migrate);
0220 #endif
0221
0222 #endif