Back to home page

LXR

 
 

    


0001 /*
0002  * mm/balloon_compaction.c
0003  *
0004  * Common interface for making balloon pages movable by compaction.
0005  *
0006  * Copyright (C) 2012, Red Hat, Inc.  Rafael Aquini <aquini@redhat.com>
0007  */
0008 #include <linux/mm.h>
0009 #include <linux/slab.h>
0010 #include <linux/export.h>
0011 #include <linux/balloon_compaction.h>
0012 
0013 /*
0014  * balloon_page_enqueue - allocates a new page and inserts it into the balloon
0015  *            page list.
0016  * @b_dev_info: balloon device descriptor where we will insert a new page to
0017  *
0018  * Driver must call it to properly allocate a new enlisted balloon page
0019  * before definitively removing it from the guest system.
0020  * This function returns the page address for the recently enqueued page or
0021  * NULL in the case we fail to allocate a new page this turn.
0022  */
0023 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
0024 {
0025     unsigned long flags;
0026     struct page *page = alloc_page(balloon_mapping_gfp_mask() |
0027                     __GFP_NOMEMALLOC | __GFP_NORETRY);
0028     if (!page)
0029         return NULL;
0030 
0031     /*
0032      * Block others from accessing the 'page' when we get around to
0033      * establishing additional references. We should be the only one
0034      * holding a reference to the 'page' at this point.
0035      */
0036     BUG_ON(!trylock_page(page));
0037     spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0038     balloon_page_insert(b_dev_info, page);
0039     __count_vm_event(BALLOON_INFLATE);
0040     spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0041     unlock_page(page);
0042     return page;
0043 }
0044 EXPORT_SYMBOL_GPL(balloon_page_enqueue);
0045 
0046 /*
0047  * balloon_page_dequeue - removes a page from balloon's page list and returns
0048  *            the its address to allow the driver release the page.
0049  * @b_dev_info: balloon device decriptor where we will grab a page from.
0050  *
0051  * Driver must call it to properly de-allocate a previous enlisted balloon page
0052  * before definetively releasing it back to the guest system.
0053  * This function returns the page address for the recently dequeued page or
0054  * NULL in the case we find balloon's page list temporarily empty due to
0055  * compaction isolated pages.
0056  */
0057 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
0058 {
0059     struct page *page, *tmp;
0060     unsigned long flags;
0061     bool dequeued_page;
0062 
0063     dequeued_page = false;
0064     spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0065     list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
0066         /*
0067          * Block others from accessing the 'page' while we get around
0068          * establishing additional references and preparing the 'page'
0069          * to be released by the balloon driver.
0070          */
0071         if (trylock_page(page)) {
0072 #ifdef CONFIG_BALLOON_COMPACTION
0073             if (PageIsolated(page)) {
0074                 /* raced with isolation */
0075                 unlock_page(page);
0076                 continue;
0077             }
0078 #endif
0079             balloon_page_delete(page);
0080             __count_vm_event(BALLOON_DEFLATE);
0081             unlock_page(page);
0082             dequeued_page = true;
0083             break;
0084         }
0085     }
0086     spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0087 
0088     if (!dequeued_page) {
0089         /*
0090          * If we are unable to dequeue a balloon page because the page
0091          * list is empty and there is no isolated pages, then something
0092          * went out of track and some balloon pages are lost.
0093          * BUG() here, otherwise the balloon driver may get stuck into
0094          * an infinite loop while attempting to release all its pages.
0095          */
0096         spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0097         if (unlikely(list_empty(&b_dev_info->pages) &&
0098                  !b_dev_info->isolated_pages))
0099             BUG();
0100         spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0101         page = NULL;
0102     }
0103     return page;
0104 }
0105 EXPORT_SYMBOL_GPL(balloon_page_dequeue);
0106 
0107 #ifdef CONFIG_BALLOON_COMPACTION
0108 
0109 bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
0110 
0111 {
0112     struct balloon_dev_info *b_dev_info = balloon_page_device(page);
0113     unsigned long flags;
0114 
0115     spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0116     list_del(&page->lru);
0117     b_dev_info->isolated_pages++;
0118     spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0119 
0120     return true;
0121 }
0122 
0123 void balloon_page_putback(struct page *page)
0124 {
0125     struct balloon_dev_info *b_dev_info = balloon_page_device(page);
0126     unsigned long flags;
0127 
0128     spin_lock_irqsave(&b_dev_info->pages_lock, flags);
0129     list_add(&page->lru, &b_dev_info->pages);
0130     b_dev_info->isolated_pages--;
0131     spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
0132 }
0133 
0134 
0135 /* move_to_new_page() counterpart for a ballooned page */
0136 int balloon_page_migrate(struct address_space *mapping,
0137         struct page *newpage, struct page *page,
0138         enum migrate_mode mode)
0139 {
0140     struct balloon_dev_info *balloon = balloon_page_device(page);
0141 
0142     VM_BUG_ON_PAGE(!PageLocked(page), page);
0143     VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
0144 
0145     return balloon->migratepage(balloon, newpage, page, mode);
0146 }
0147 
0148 const struct address_space_operations balloon_aops = {
0149     .migratepage = balloon_page_migrate,
0150     .isolate_page = balloon_page_isolate,
0151     .putback_page = balloon_page_putback,
0152 };
0153 EXPORT_SYMBOL_GPL(balloon_aops);
0154 
0155 #endif /* CONFIG_BALLOON_COMPACTION */