Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _LINUX_MIGRATE_H
0003 #define _LINUX_MIGRATE_H
0004 
0005 #include <linux/mm.h>
0006 #include <linux/mempolicy.h>
0007 #include <linux/migrate_mode.h>
0008 #include <linux/hugetlb.h>
0009 
0010 typedef struct page *new_page_t(struct page *page, unsigned long private);
0011 typedef void free_page_t(struct page *page, unsigned long private);
0012 
0013 struct migration_target_control;
0014 
0015 /*
0016  * Return values from addresss_space_operations.migratepage():
0017  * - negative errno on page migration failure;
0018  * - zero on page migration success;
0019  */
0020 #define MIGRATEPAGE_SUCCESS     0
0021 
0022 /**
0023  * struct movable_operations - Driver page migration
0024  * @isolate_page:
0025  * The VM calls this function to prepare the page to be moved.  The page
0026  * is locked and the driver should not unlock it.  The driver should
0027  * return ``true`` if the page is movable and ``false`` if it is not
0028  * currently movable.  After this function returns, the VM uses the
0029  * page->lru field, so the driver must preserve any information which
0030  * is usually stored here.
0031  *
0032  * @migrate_page:
0033  * After isolation, the VM calls this function with the isolated
0034  * @src page.  The driver should copy the contents of the
0035  * @src page to the @dst page and set up the fields of @dst page.
0036  * Both pages are locked.
0037  * If page migration is successful, the driver should call
0038  * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
0039  * If the driver cannot migrate the page at the moment, it can return
0040  * -EAGAIN.  The VM interprets this as a temporary migration failure and
0041  * will retry it later.  Any other error value is a permanent migration
0042  * failure and migration will not be retried.
0043  * The driver shouldn't touch the @src->lru field while in the
0044  * migrate_page() function.  It may write to @dst->lru.
0045  *
0046  * @putback_page:
0047  * If migration fails on the isolated page, the VM informs the driver
0048  * that the page is no longer a candidate for migration by calling
0049  * this function.  The driver should put the isolated page back into
0050  * its own data structure.
0051  */
0052 struct movable_operations {
0053     bool (*isolate_page)(struct page *, isolate_mode_t);
0054     int (*migrate_page)(struct page *dst, struct page *src,
0055             enum migrate_mode);
0056     void (*putback_page)(struct page *);
0057 };
0058 
0059 /* Defined in mm/debug.c: */
0060 extern const char *migrate_reason_names[MR_TYPES];
0061 
0062 #ifdef CONFIG_MIGRATION
0063 
0064 extern void putback_movable_pages(struct list_head *l);
0065 int migrate_folio(struct address_space *mapping, struct folio *dst,
0066         struct folio *src, enum migrate_mode mode);
0067 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
0068         unsigned long private, enum migrate_mode mode, int reason,
0069         unsigned int *ret_succeeded);
0070 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
0071 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
0072 
0073 int migrate_huge_page_move_mapping(struct address_space *mapping,
0074         struct folio *dst, struct folio *src);
0075 void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
0076                 spinlock_t *ptl);
0077 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
0078 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
0079 int folio_migrate_mapping(struct address_space *mapping,
0080         struct folio *newfolio, struct folio *folio, int extra_count);
0081 
0082 #else
0083 
0084 static inline void putback_movable_pages(struct list_head *l) {}
0085 static inline int migrate_pages(struct list_head *l, new_page_t new,
0086         free_page_t free, unsigned long private, enum migrate_mode mode,
0087         int reason, unsigned int *ret_succeeded)
0088     { return -ENOSYS; }
0089 static inline struct page *alloc_migration_target(struct page *page,
0090         unsigned long private)
0091     { return NULL; }
0092 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
0093     { return -EBUSY; }
0094 
0095 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
0096                   struct folio *dst, struct folio *src)
0097 {
0098     return -ENOSYS;
0099 }
0100 
0101 #endif /* CONFIG_MIGRATION */
0102 
0103 #if defined(CONFIG_MIGRATION) && defined(CONFIG_NUMA)
0104 extern void set_migration_target_nodes(void);
0105 extern void migrate_on_reclaim_init(void);
0106 extern bool numa_demotion_enabled;
0107 extern int next_demotion_node(int node);
0108 #else
0109 static inline void set_migration_target_nodes(void) {}
0110 static inline void migrate_on_reclaim_init(void) {}
0111 static inline int next_demotion_node(int node)
0112 {
0113         return NUMA_NO_NODE;
0114 }
0115 #define numa_demotion_enabled  false
0116 #endif
0117 
0118 #ifdef CONFIG_COMPACTION
0119 bool PageMovable(struct page *page);
0120 void __SetPageMovable(struct page *page, const struct movable_operations *ops);
0121 void __ClearPageMovable(struct page *page);
0122 #else
0123 static inline bool PageMovable(struct page *page) { return false; }
0124 static inline void __SetPageMovable(struct page *page,
0125         const struct movable_operations *ops)
0126 {
0127 }
0128 static inline void __ClearPageMovable(struct page *page)
0129 {
0130 }
0131 #endif
0132 
0133 static inline bool folio_test_movable(struct folio *folio)
0134 {
0135     return PageMovable(&folio->page);
0136 }
0137 
0138 static inline
0139 const struct movable_operations *page_movable_ops(struct page *page)
0140 {
0141     VM_BUG_ON(!__PageMovable(page));
0142 
0143     return (const struct movable_operations *)
0144         ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
0145 }
0146 
0147 #ifdef CONFIG_NUMA_BALANCING
0148 extern int migrate_misplaced_page(struct page *page,
0149                   struct vm_area_struct *vma, int node);
0150 #else
0151 static inline int migrate_misplaced_page(struct page *page,
0152                      struct vm_area_struct *vma, int node)
0153 {
0154     return -EAGAIN; /* can't migrate now */
0155 }
0156 #endif /* CONFIG_NUMA_BALANCING */
0157 
0158 #ifdef CONFIG_MIGRATION
0159 
0160 /*
0161  * Watch out for PAE architecture, which has an unsigned long, and might not
0162  * have enough bits to store all physical address and flags. So far we have
0163  * enough room for all our flags.
0164  */
0165 #define MIGRATE_PFN_VALID   (1UL << 0)
0166 #define MIGRATE_PFN_MIGRATE (1UL << 1)
0167 #define MIGRATE_PFN_WRITE   (1UL << 3)
0168 #define MIGRATE_PFN_SHIFT   6
0169 
0170 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
0171 {
0172     if (!(mpfn & MIGRATE_PFN_VALID))
0173         return NULL;
0174     return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
0175 }
0176 
0177 static inline unsigned long migrate_pfn(unsigned long pfn)
0178 {
0179     return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
0180 }
0181 
0182 enum migrate_vma_direction {
0183     MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
0184     MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
0185     MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
0186 };
0187 
0188 struct migrate_vma {
0189     struct vm_area_struct   *vma;
0190     /*
0191      * Both src and dst array must be big enough for
0192      * (end - start) >> PAGE_SHIFT entries.
0193      *
0194      * The src array must not be modified by the caller after
0195      * migrate_vma_setup(), and must not change the dst array after
0196      * migrate_vma_pages() returns.
0197      */
0198     unsigned long       *dst;
0199     unsigned long       *src;
0200     unsigned long       cpages;
0201     unsigned long       npages;
0202     unsigned long       start;
0203     unsigned long       end;
0204 
0205     /*
0206      * Set to the owner value also stored in page->pgmap->owner for
0207      * migrating out of device private memory. The flags also need to
0208      * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
0209      * The caller should always set this field when using mmu notifier
0210      * callbacks to avoid device MMU invalidations for device private
0211      * pages that are not being migrated.
0212      */
0213     void            *pgmap_owner;
0214     unsigned long       flags;
0215 };
0216 
0217 int migrate_vma_setup(struct migrate_vma *args);
0218 void migrate_vma_pages(struct migrate_vma *migrate);
0219 void migrate_vma_finalize(struct migrate_vma *migrate);
0220 #endif /* CONFIG_MIGRATION */
0221 
0222 #endif /* _LINUX_MIGRATE_H */