Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2020 Google LLC
0004  * Author: Quentin Perret <qperret@google.com>
0005  */
0006 
0007 #include <asm/kvm_hyp.h>
0008 #include <nvhe/gfp.h>
0009 
0010 u64 __hyp_vmemmap;
0011 
0012 /*
0013  * Index the hyp_vmemmap to find a potential buddy page, but make no assumption
0014  * about its current state.
0015  *
0016  * Example buddy-tree for a 4-pages physically contiguous pool:
0017  *
0018  *                 o : Page 3
0019  *                /
0020  *               o-o : Page 2
0021  *              /
0022  *             /   o : Page 1
0023  *            /   /
0024  *           o---o-o : Page 0
0025  *    Order  2   1 0
0026  *
0027  * Example of requests on this pool:
0028  *   __find_buddy_nocheck(pool, page 0, order 0) => page 1
0029  *   __find_buddy_nocheck(pool, page 0, order 1) => page 2
0030  *   __find_buddy_nocheck(pool, page 1, order 0) => page 0
0031  *   __find_buddy_nocheck(pool, page 2, order 0) => page 3
0032  */
0033 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
0034                          struct hyp_page *p,
0035                          unsigned short order)
0036 {
0037     phys_addr_t addr = hyp_page_to_phys(p);
0038 
0039     addr ^= (PAGE_SIZE << order);
0040 
0041     /*
0042      * Don't return a page outside the pool range -- it belongs to
0043      * something else and may not be mapped in hyp_vmemmap.
0044      */
0045     if (addr < pool->range_start || addr >= pool->range_end)
0046         return NULL;
0047 
0048     return hyp_phys_to_page(addr);
0049 }
0050 
0051 /* Find a buddy page currently available for allocation */
0052 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
0053                        struct hyp_page *p,
0054                        unsigned short order)
0055 {
0056     struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
0057 
0058     if (!buddy || buddy->order != order || buddy->refcount)
0059         return NULL;
0060 
0061     return buddy;
0062 
0063 }
0064 
0065 /*
0066  * Pages that are available for allocation are tracked in free-lists, so we use
0067  * the pages themselves to store the list nodes to avoid wasting space. As the
0068  * allocator always returns zeroed pages (which are zeroed on the hyp_put_page()
0069  * path to optimize allocation speed), we also need to clean-up the list node in
0070  * each page when we take it out of the list.
0071  */
0072 static inline void page_remove_from_list(struct hyp_page *p)
0073 {
0074     struct list_head *node = hyp_page_to_virt(p);
0075 
0076     __list_del_entry(node);
0077     memset(node, 0, sizeof(*node));
0078 }
0079 
0080 static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
0081 {
0082     struct list_head *node = hyp_page_to_virt(p);
0083 
0084     INIT_LIST_HEAD(node);
0085     list_add_tail(node, head);
0086 }
0087 
0088 static inline struct hyp_page *node_to_page(struct list_head *node)
0089 {
0090     return hyp_virt_to_page(node);
0091 }
0092 
0093 static void __hyp_attach_page(struct hyp_pool *pool,
0094                   struct hyp_page *p)
0095 {
0096     unsigned short order = p->order;
0097     struct hyp_page *buddy;
0098 
0099     memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
0100 
0101     /*
0102      * Only the first struct hyp_page of a high-order page (otherwise known
0103      * as the 'head') should have p->order set. The non-head pages should
0104      * have p->order = HYP_NO_ORDER. Here @p may no longer be the head
0105      * after coalescing, so make sure to mark it HYP_NO_ORDER proactively.
0106      */
0107     p->order = HYP_NO_ORDER;
0108     for (; (order + 1) < pool->max_order; order++) {
0109         buddy = __find_buddy_avail(pool, p, order);
0110         if (!buddy)
0111             break;
0112 
0113         /* Take the buddy out of its list, and coalesce with @p */
0114         page_remove_from_list(buddy);
0115         buddy->order = HYP_NO_ORDER;
0116         p = min(p, buddy);
0117     }
0118 
0119     /* Mark the new head, and insert it */
0120     p->order = order;
0121     page_add_to_list(p, &pool->free_area[order]);
0122 }
0123 
0124 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
0125                        struct hyp_page *p,
0126                        unsigned short order)
0127 {
0128     struct hyp_page *buddy;
0129 
0130     page_remove_from_list(p);
0131     while (p->order > order) {
0132         /*
0133          * The buddy of order n - 1 currently has HYP_NO_ORDER as it
0134          * is covered by a higher-level page (whose head is @p). Use
0135          * __find_buddy_nocheck() to find it and inject it in the
0136          * free_list[n - 1], effectively splitting @p in half.
0137          */
0138         p->order--;
0139         buddy = __find_buddy_nocheck(pool, p, p->order);
0140         buddy->order = p->order;
0141         page_add_to_list(buddy, &pool->free_area[buddy->order]);
0142     }
0143 
0144     return p;
0145 }
0146 
0147 static inline void hyp_page_ref_inc(struct hyp_page *p)
0148 {
0149     BUG_ON(p->refcount == USHRT_MAX);
0150     p->refcount++;
0151 }
0152 
0153 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
0154 {
0155     BUG_ON(!p->refcount);
0156     p->refcount--;
0157     return (p->refcount == 0);
0158 }
0159 
0160 static inline void hyp_set_page_refcounted(struct hyp_page *p)
0161 {
0162     BUG_ON(p->refcount);
0163     p->refcount = 1;
0164 }
0165 
0166 static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
0167 {
0168     if (hyp_page_ref_dec_and_test(p))
0169         __hyp_attach_page(pool, p);
0170 }
0171 
0172 /*
0173  * Changes to the buddy tree and page refcounts must be done with the hyp_pool
0174  * lock held. If a refcount change requires an update to the buddy tree (e.g.
0175  * hyp_put_page()), both operations must be done within the same critical
0176  * section to guarantee transient states (e.g. a page with null refcount but
0177  * not yet attached to a free list) can't be observed by well-behaved readers.
0178  */
0179 void hyp_put_page(struct hyp_pool *pool, void *addr)
0180 {
0181     struct hyp_page *p = hyp_virt_to_page(addr);
0182 
0183     hyp_spin_lock(&pool->lock);
0184     __hyp_put_page(pool, p);
0185     hyp_spin_unlock(&pool->lock);
0186 }
0187 
0188 void hyp_get_page(struct hyp_pool *pool, void *addr)
0189 {
0190     struct hyp_page *p = hyp_virt_to_page(addr);
0191 
0192     hyp_spin_lock(&pool->lock);
0193     hyp_page_ref_inc(p);
0194     hyp_spin_unlock(&pool->lock);
0195 }
0196 
0197 void hyp_split_page(struct hyp_page *p)
0198 {
0199     unsigned short order = p->order;
0200     unsigned int i;
0201 
0202     p->order = 0;
0203     for (i = 1; i < (1 << order); i++) {
0204         struct hyp_page *tail = p + i;
0205 
0206         tail->order = 0;
0207         hyp_set_page_refcounted(tail);
0208     }
0209 }
0210 
0211 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
0212 {
0213     unsigned short i = order;
0214     struct hyp_page *p;
0215 
0216     hyp_spin_lock(&pool->lock);
0217 
0218     /* Look for a high-enough-order page */
0219     while (i < pool->max_order && list_empty(&pool->free_area[i]))
0220         i++;
0221     if (i >= pool->max_order) {
0222         hyp_spin_unlock(&pool->lock);
0223         return NULL;
0224     }
0225 
0226     /* Extract it from the tree at the right order */
0227     p = node_to_page(pool->free_area[i].next);
0228     p = __hyp_extract_page(pool, p, order);
0229 
0230     hyp_set_page_refcounted(p);
0231     hyp_spin_unlock(&pool->lock);
0232 
0233     return hyp_page_to_virt(p);
0234 }
0235 
0236 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
0237           unsigned int reserved_pages)
0238 {
0239     phys_addr_t phys = hyp_pfn_to_phys(pfn);
0240     struct hyp_page *p;
0241     int i;
0242 
0243     hyp_spin_lock_init(&pool->lock);
0244     pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
0245     for (i = 0; i < pool->max_order; i++)
0246         INIT_LIST_HEAD(&pool->free_area[i]);
0247     pool->range_start = phys;
0248     pool->range_end = phys + (nr_pages << PAGE_SHIFT);
0249 
0250     /* Init the vmemmap portion */
0251     p = hyp_phys_to_page(phys);
0252     for (i = 0; i < nr_pages; i++) {
0253         p[i].order = 0;
0254         hyp_set_page_refcounted(&p[i]);
0255     }
0256 
0257     /* Attach the unused pages to the buddy tree */
0258     for (i = reserved_pages; i < nr_pages; i++)
0259         __hyp_put_page(pool, &p[i]);
0260 
0261     return 0;
0262 }