0001
0002
0003
0004
0005
0006
0007 #include <asm/kvm_hyp.h>
0008 #include <nvhe/gfp.h>
0009
0010 u64 __hyp_vmemmap;
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
0034 struct hyp_page *p,
0035 unsigned short order)
0036 {
0037 phys_addr_t addr = hyp_page_to_phys(p);
0038
0039 addr ^= (PAGE_SIZE << order);
0040
0041
0042
0043
0044
0045 if (addr < pool->range_start || addr >= pool->range_end)
0046 return NULL;
0047
0048 return hyp_phys_to_page(addr);
0049 }
0050
0051
0052 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
0053 struct hyp_page *p,
0054 unsigned short order)
0055 {
0056 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
0057
0058 if (!buddy || buddy->order != order || buddy->refcount)
0059 return NULL;
0060
0061 return buddy;
0062
0063 }
0064
0065
0066
0067
0068
0069
0070
0071
0072 static inline void page_remove_from_list(struct hyp_page *p)
0073 {
0074 struct list_head *node = hyp_page_to_virt(p);
0075
0076 __list_del_entry(node);
0077 memset(node, 0, sizeof(*node));
0078 }
0079
0080 static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
0081 {
0082 struct list_head *node = hyp_page_to_virt(p);
0083
0084 INIT_LIST_HEAD(node);
0085 list_add_tail(node, head);
0086 }
0087
0088 static inline struct hyp_page *node_to_page(struct list_head *node)
0089 {
0090 return hyp_virt_to_page(node);
0091 }
0092
0093 static void __hyp_attach_page(struct hyp_pool *pool,
0094 struct hyp_page *p)
0095 {
0096 unsigned short order = p->order;
0097 struct hyp_page *buddy;
0098
0099 memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
0100
0101
0102
0103
0104
0105
0106
0107 p->order = HYP_NO_ORDER;
0108 for (; (order + 1) < pool->max_order; order++) {
0109 buddy = __find_buddy_avail(pool, p, order);
0110 if (!buddy)
0111 break;
0112
0113
0114 page_remove_from_list(buddy);
0115 buddy->order = HYP_NO_ORDER;
0116 p = min(p, buddy);
0117 }
0118
0119
0120 p->order = order;
0121 page_add_to_list(p, &pool->free_area[order]);
0122 }
0123
0124 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
0125 struct hyp_page *p,
0126 unsigned short order)
0127 {
0128 struct hyp_page *buddy;
0129
0130 page_remove_from_list(p);
0131 while (p->order > order) {
0132
0133
0134
0135
0136
0137
0138 p->order--;
0139 buddy = __find_buddy_nocheck(pool, p, p->order);
0140 buddy->order = p->order;
0141 page_add_to_list(buddy, &pool->free_area[buddy->order]);
0142 }
0143
0144 return p;
0145 }
0146
0147 static inline void hyp_page_ref_inc(struct hyp_page *p)
0148 {
0149 BUG_ON(p->refcount == USHRT_MAX);
0150 p->refcount++;
0151 }
0152
0153 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
0154 {
0155 BUG_ON(!p->refcount);
0156 p->refcount--;
0157 return (p->refcount == 0);
0158 }
0159
0160 static inline void hyp_set_page_refcounted(struct hyp_page *p)
0161 {
0162 BUG_ON(p->refcount);
0163 p->refcount = 1;
0164 }
0165
0166 static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
0167 {
0168 if (hyp_page_ref_dec_and_test(p))
0169 __hyp_attach_page(pool, p);
0170 }
0171
0172
0173
0174
0175
0176
0177
0178
0179 void hyp_put_page(struct hyp_pool *pool, void *addr)
0180 {
0181 struct hyp_page *p = hyp_virt_to_page(addr);
0182
0183 hyp_spin_lock(&pool->lock);
0184 __hyp_put_page(pool, p);
0185 hyp_spin_unlock(&pool->lock);
0186 }
0187
0188 void hyp_get_page(struct hyp_pool *pool, void *addr)
0189 {
0190 struct hyp_page *p = hyp_virt_to_page(addr);
0191
0192 hyp_spin_lock(&pool->lock);
0193 hyp_page_ref_inc(p);
0194 hyp_spin_unlock(&pool->lock);
0195 }
0196
0197 void hyp_split_page(struct hyp_page *p)
0198 {
0199 unsigned short order = p->order;
0200 unsigned int i;
0201
0202 p->order = 0;
0203 for (i = 1; i < (1 << order); i++) {
0204 struct hyp_page *tail = p + i;
0205
0206 tail->order = 0;
0207 hyp_set_page_refcounted(tail);
0208 }
0209 }
0210
0211 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
0212 {
0213 unsigned short i = order;
0214 struct hyp_page *p;
0215
0216 hyp_spin_lock(&pool->lock);
0217
0218
0219 while (i < pool->max_order && list_empty(&pool->free_area[i]))
0220 i++;
0221 if (i >= pool->max_order) {
0222 hyp_spin_unlock(&pool->lock);
0223 return NULL;
0224 }
0225
0226
0227 p = node_to_page(pool->free_area[i].next);
0228 p = __hyp_extract_page(pool, p, order);
0229
0230 hyp_set_page_refcounted(p);
0231 hyp_spin_unlock(&pool->lock);
0232
0233 return hyp_page_to_virt(p);
0234 }
0235
0236 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
0237 unsigned int reserved_pages)
0238 {
0239 phys_addr_t phys = hyp_pfn_to_phys(pfn);
0240 struct hyp_page *p;
0241 int i;
0242
0243 hyp_spin_lock_init(&pool->lock);
0244 pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
0245 for (i = 0; i < pool->max_order; i++)
0246 INIT_LIST_HEAD(&pool->free_area[i]);
0247 pool->range_start = phys;
0248 pool->range_end = phys + (nr_pages << PAGE_SHIFT);
0249
0250
0251 p = hyp_phys_to_page(phys);
0252 for (i = 0; i < nr_pages; i++) {
0253 p[i].order = 0;
0254 hyp_set_page_refcounted(&p[i]);
0255 }
0256
0257
0258 for (i = reserved_pages; i < nr_pages; i++)
0259 __hyp_put_page(pool, &p[i]);
0260
0261 return 0;
0262 }