0001
0002 #include <linux/errno.h>
0003 #include <linux/gfp.h>
0004 #include <linux/kernel.h>
0005 #include <linux/mm.h>
0006 #include <linux/memremap.h>
0007 #include <linux/slab.h>
0008
0009 #include <asm/page.h>
0010
0011 #include <xen/balloon.h>
0012 #include <xen/page.h>
0013 #include <xen/xen.h>
0014
0015 static DEFINE_MUTEX(list_lock);
0016 static struct page *page_list;
0017 static unsigned int list_count;
0018
0019 static struct resource *target_resource;
0020
0021
0022
0023
0024
0025
0026
0027 int __weak __init arch_xen_unpopulated_init(struct resource **res)
0028 {
0029 *res = &iomem_resource;
0030
0031 return 0;
0032 }
0033
0034 static int fill_list(unsigned int nr_pages)
0035 {
0036 struct dev_pagemap *pgmap;
0037 struct resource *res, *tmp_res = NULL;
0038 void *vaddr;
0039 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
0040 struct range mhp_range;
0041 int ret;
0042
0043 res = kzalloc(sizeof(*res), GFP_KERNEL);
0044 if (!res)
0045 return -ENOMEM;
0046
0047 res->name = "Xen scratch";
0048 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
0049
0050 mhp_range = mhp_get_pluggable_range(true);
0051
0052 ret = allocate_resource(target_resource, res,
0053 alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
0054 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
0055 if (ret < 0) {
0056 pr_err("Cannot allocate new IOMEM resource\n");
0057 goto err_resource;
0058 }
0059
0060
0061
0062
0063
0064 if (target_resource != &iomem_resource) {
0065 tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
0066 if (!tmp_res) {
0067 ret = -ENOMEM;
0068 goto err_insert;
0069 }
0070
0071 tmp_res->name = res->name;
0072 tmp_res->start = res->start;
0073 tmp_res->end = res->end;
0074 tmp_res->flags = res->flags;
0075
0076 ret = request_resource(&iomem_resource, tmp_res);
0077 if (ret < 0) {
0078 pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
0079 kfree(tmp_res);
0080 goto err_insert;
0081 }
0082 }
0083
0084 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
0085 if (!pgmap) {
0086 ret = -ENOMEM;
0087 goto err_pgmap;
0088 }
0089
0090 pgmap->type = MEMORY_DEVICE_GENERIC;
0091 pgmap->range = (struct range) {
0092 .start = res->start,
0093 .end = res->end,
0094 };
0095 pgmap->nr_range = 1;
0096 pgmap->owner = res;
0097
0098 #ifdef CONFIG_XEN_HAVE_PVMMU
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
0109 xen_pfn_t pfn = PFN_DOWN(res->start);
0110
0111 for (i = 0; i < alloc_pages; i++) {
0112 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
0113 pr_warn("set_phys_to_machine() failed, no memory added\n");
0114 ret = -ENOMEM;
0115 goto err_memremap;
0116 }
0117 }
0118 }
0119 #endif
0120
0121 vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
0122 if (IS_ERR(vaddr)) {
0123 pr_err("Cannot remap memory range\n");
0124 ret = PTR_ERR(vaddr);
0125 goto err_memremap;
0126 }
0127
0128 for (i = 0; i < alloc_pages; i++) {
0129 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
0130
0131 pg->zone_device_data = page_list;
0132 page_list = pg;
0133 list_count++;
0134 }
0135
0136 return 0;
0137
0138 err_memremap:
0139 kfree(pgmap);
0140 err_pgmap:
0141 if (tmp_res) {
0142 release_resource(tmp_res);
0143 kfree(tmp_res);
0144 }
0145 err_insert:
0146 release_resource(res);
0147 err_resource:
0148 kfree(res);
0149 return ret;
0150 }
0151
0152
0153
0154
0155
0156
0157
0158 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
0159 {
0160 unsigned int i;
0161 int ret = 0;
0162
0163
0164
0165
0166
0167
0168 if (!target_resource)
0169 return xen_alloc_ballooned_pages(nr_pages, pages);
0170
0171 mutex_lock(&list_lock);
0172 if (list_count < nr_pages) {
0173 ret = fill_list(nr_pages - list_count);
0174 if (ret)
0175 goto out;
0176 }
0177
0178 for (i = 0; i < nr_pages; i++) {
0179 struct page *pg = page_list;
0180
0181 BUG_ON(!pg);
0182 page_list = pg->zone_device_data;
0183 list_count--;
0184 pages[i] = pg;
0185
0186 #ifdef CONFIG_XEN_HAVE_PVMMU
0187 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
0188 ret = xen_alloc_p2m_entry(page_to_pfn(pg));
0189 if (ret < 0) {
0190 unsigned int j;
0191
0192 for (j = 0; j <= i; j++) {
0193 pages[j]->zone_device_data = page_list;
0194 page_list = pages[j];
0195 list_count++;
0196 }
0197 goto out;
0198 }
0199 }
0200 #endif
0201 }
0202
0203 out:
0204 mutex_unlock(&list_lock);
0205 return ret;
0206 }
0207 EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
0208
0209
0210
0211
0212
0213
0214 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
0215 {
0216 unsigned int i;
0217
0218 if (!target_resource) {
0219 xen_free_ballooned_pages(nr_pages, pages);
0220 return;
0221 }
0222
0223 mutex_lock(&list_lock);
0224 for (i = 0; i < nr_pages; i++) {
0225 pages[i]->zone_device_data = page_list;
0226 page_list = pages[i];
0227 list_count++;
0228 }
0229 mutex_unlock(&list_lock);
0230 }
0231 EXPORT_SYMBOL(xen_free_unpopulated_pages);
0232
0233 static int __init unpopulated_init(void)
0234 {
0235 int ret;
0236
0237 if (!xen_domain())
0238 return -ENODEV;
0239
0240 ret = arch_xen_unpopulated_init(&target_resource);
0241 if (ret) {
0242 pr_err("xen:unpopulated: Cannot initialize target resource\n");
0243 target_resource = NULL;
0244 }
0245
0246 return ret;
0247 }
0248 early_initcall(unpopulated_init);