0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/mm.h>
0010 #include <linux/slab.h>
0011 #include <linux/export.h>
0012 #include <linux/balloon_compaction.h>
0013
0014 static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
0015 struct page *page)
0016 {
0017
0018
0019
0020
0021
0022
0023 BUG_ON(!trylock_page(page));
0024 balloon_page_insert(b_dev_info, page);
0025 unlock_page(page);
0026 __count_vm_event(BALLOON_INFLATE);
0027 }
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
0041 struct list_head *pages)
0042 {
0043 struct page *page, *tmp;
0044 unsigned long flags;
0045 size_t n_pages = 0;
0046
0047 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0048 list_for_each_entry_safe(page, tmp, pages, lru) {
0049 list_del(&page->lru);
0050 balloon_page_enqueue_one(b_dev_info, page);
0051 n_pages++;
0052 }
0053 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0054 return n_pages;
0055 }
0056 EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
0077 struct list_head *pages, size_t n_req_pages)
0078 {
0079 struct page *page, *tmp;
0080 unsigned long flags;
0081 size_t n_pages = 0;
0082
0083 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0084 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
0085 if (n_pages == n_req_pages)
0086 break;
0087
0088
0089
0090
0091
0092
0093 if (!trylock_page(page))
0094 continue;
0095
0096 if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) &&
0097 PageIsolated(page)) {
0098
0099 unlock_page(page);
0100 continue;
0101 }
0102 balloon_page_delete(page);
0103 __count_vm_event(BALLOON_DEFLATE);
0104 list_add(&page->lru, pages);
0105 unlock_page(page);
0106 n_pages++;
0107 }
0108 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0109
0110 return n_pages;
0111 }
0112 EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124 struct page *balloon_page_alloc(void)
0125 {
0126 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
0127 __GFP_NOMEMALLOC | __GFP_NORETRY |
0128 __GFP_NOWARN);
0129 return page;
0130 }
0131 EXPORT_SYMBOL_GPL(balloon_page_alloc);
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146 void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
0147 struct page *page)
0148 {
0149 unsigned long flags;
0150
0151 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0152 balloon_page_enqueue_one(b_dev_info, page);
0153 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0154 }
0155 EXPORT_SYMBOL_GPL(balloon_page_enqueue);
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
0178 {
0179 unsigned long flags;
0180 LIST_HEAD(pages);
0181 int n_pages;
0182
0183 n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1);
0184
0185 if (n_pages != 1) {
0186
0187
0188
0189
0190
0191
0192
0193 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0194 if (unlikely(list_empty(&b_dev_info->pages) &&
0195 !b_dev_info->isolated_pages))
0196 BUG();
0197 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0198 return NULL;
0199 }
0200 return list_first_entry(&pages, struct page, lru);
0201 }
0202 EXPORT_SYMBOL_GPL(balloon_page_dequeue);
0203
0204 #ifdef CONFIG_BALLOON_COMPACTION
0205
0206 static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
0207
0208 {
0209 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
0210 unsigned long flags;
0211
0212 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0213 list_del(&page->lru);
0214 b_dev_info->isolated_pages++;
0215 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0216
0217 return true;
0218 }
0219
0220 static void balloon_page_putback(struct page *page)
0221 {
0222 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
0223 unsigned long flags;
0224
0225 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0226 list_add(&page->lru, &b_dev_info->pages);
0227 b_dev_info->isolated_pages--;
0228 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0229 }
0230
0231
0232 static int balloon_page_migrate(struct page *newpage, struct page *page,
0233 enum migrate_mode mode)
0234 {
0235 struct balloon_dev_info *balloon = balloon_page_device(page);
0236
0237
0238
0239
0240
0241
0242 if (mode == MIGRATE_SYNC_NO_COPY)
0243 return -EINVAL;
0244
0245 VM_BUG_ON_PAGE(!PageLocked(page), page);
0246 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
0247
0248 return balloon->migratepage(balloon, newpage, page, mode);
0249 }
0250
0251 const struct movable_operations balloon_mops = {
0252 .migrate_page = balloon_page_migrate,
0253 .isolate_page = balloon_page_isolate,
0254 .putback_page = balloon_page_putback,
0255 };
0256 EXPORT_SYMBOL_GPL(balloon_mops);
0257
0258 #endif