0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #include <linux/kernel.h>
0031 #include <linux/mm.h>
0032 #include <linux/slab.h>
0033 #include <linux/vmalloc.h>
0034
0035 #include <asm/xen/hypercall.h>
0036 #include <asm/xen/hypervisor.h>
0037
0038 #include <xen/xen.h>
0039 #include <xen/xen-ops.h>
0040 #include <xen/page.h>
0041 #include <xen/interface/xen.h>
0042 #include <xen/interface/memory.h>
0043 #include <xen/balloon.h>
0044
0045 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
0046
0047
0048 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
0049 xen_gfn_fn_t fn, void *data)
0050 {
0051 unsigned long xen_pfn = 0;
0052 struct page *page;
0053 int i;
0054
0055 for (i = 0; i < nr_gfn; i++) {
0056 if ((i % XEN_PFN_PER_PAGE) == 0) {
0057 page = pages[i / XEN_PFN_PER_PAGE];
0058 xen_pfn = page_to_xen_pfn(page);
0059 }
0060 fn(pfn_to_gfn(xen_pfn++), data);
0061 }
0062 }
0063
0064 struct remap_data {
0065 xen_pfn_t *fgfn;
0066 int nr_fgfn;
0067 pgprot_t prot;
0068 domid_t domid;
0069 struct vm_area_struct *vma;
0070 int index;
0071 struct page **pages;
0072 struct xen_remap_gfn_info *info;
0073 int *err_ptr;
0074 int mapped;
0075
0076
0077 int h_errs[XEN_PFN_PER_PAGE];
0078 xen_ulong_t h_idxs[XEN_PFN_PER_PAGE];
0079 xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE];
0080
0081 int h_iter;
0082 };
0083
0084 static void setup_hparams(unsigned long gfn, void *data)
0085 {
0086 struct remap_data *info = data;
0087
0088 info->h_idxs[info->h_iter] = *info->fgfn;
0089 info->h_gpfns[info->h_iter] = gfn;
0090 info->h_errs[info->h_iter] = 0;
0091
0092 info->h_iter++;
0093 info->fgfn++;
0094 }
0095
0096 static int remap_pte_fn(pte_t *ptep, unsigned long addr, void *data)
0097 {
0098 struct remap_data *info = data;
0099 struct page *page = info->pages[info->index++];
0100 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
0101 int rc, nr_gfn;
0102 uint32_t i;
0103 struct xen_add_to_physmap_range xatp = {
0104 .domid = DOMID_SELF,
0105 .foreign_domid = info->domid,
0106 .space = XENMAPSPACE_gmfn_foreign,
0107 };
0108
0109 nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn);
0110 info->nr_fgfn -= nr_gfn;
0111
0112 info->h_iter = 0;
0113 xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
0114 BUG_ON(info->h_iter != nr_gfn);
0115
0116 set_xen_guest_handle(xatp.idxs, info->h_idxs);
0117 set_xen_guest_handle(xatp.gpfns, info->h_gpfns);
0118 set_xen_guest_handle(xatp.errs, info->h_errs);
0119 xatp.size = nr_gfn;
0120
0121 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
0122
0123
0124 for (i = 0; i < nr_gfn; i++) {
0125 int err = (rc < 0) ? rc : info->h_errs[i];
0126
0127 *(info->err_ptr++) = err;
0128 if (!err)
0129 info->mapped++;
0130 }
0131
0132
0133
0134
0135
0136
0137 if (!rc)
0138 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
0139
0140 return 0;
0141 }
0142
0143 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
0144 unsigned long addr,
0145 xen_pfn_t *gfn, int nr,
0146 int *err_ptr, pgprot_t prot,
0147 unsigned domid,
0148 struct page **pages)
0149 {
0150 int err;
0151 struct remap_data data;
0152 unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
0153
0154
0155
0156 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
0157
0158 data.fgfn = gfn;
0159 data.nr_fgfn = nr;
0160 data.prot = prot;
0161 data.domid = domid;
0162 data.vma = vma;
0163 data.pages = pages;
0164 data.index = 0;
0165 data.err_ptr = err_ptr;
0166 data.mapped = 0;
0167
0168 err = apply_to_page_range(vma->vm_mm, addr, range,
0169 remap_pte_fn, &data);
0170 return err < 0 ? err : data.mapped;
0171 }
0172 EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
0173
0174 static void unmap_gfn(unsigned long gfn, void *data)
0175 {
0176 struct xen_remove_from_physmap xrp;
0177
0178 xrp.domid = DOMID_SELF;
0179 xrp.gpfn = gfn;
0180 (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
0181 }
0182
0183 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
0184 int nr, struct page **pages)
0185 {
0186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL);
0187
0188 return 0;
0189 }
0190 EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
0191
0192 struct map_balloon_pages {
0193 xen_pfn_t *pfns;
0194 unsigned int idx;
0195 };
0196
0197 static void setup_balloon_gfn(unsigned long gfn, void *data)
0198 {
0199 struct map_balloon_pages *info = data;
0200
0201 info->pfns[info->idx++] = gfn;
0202 }
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214 int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
0215 unsigned long nr_grant_frames)
0216 {
0217 struct page **pages;
0218 xen_pfn_t *pfns;
0219 void *vaddr;
0220 struct map_balloon_pages data;
0221 int rc;
0222 unsigned long nr_pages;
0223
0224 BUG_ON(nr_grant_frames == 0);
0225 nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
0226 pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
0227 if (!pages)
0228 return -ENOMEM;
0229
0230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
0231 if (!pfns) {
0232 kfree(pages);
0233 return -ENOMEM;
0234 }
0235 rc = xen_alloc_unpopulated_pages(nr_pages, pages);
0236 if (rc) {
0237 pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
0238 nr_pages, rc);
0239 kfree(pages);
0240 kfree(pfns);
0241 return rc;
0242 }
0243
0244 data.pfns = pfns;
0245 data.idx = 0;
0246 xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
0247
0248 vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
0249 if (!vaddr) {
0250 pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
0251 nr_pages, rc);
0252 xen_free_unpopulated_pages(nr_pages, pages);
0253 kfree(pages);
0254 kfree(pfns);
0255 return -ENOMEM;
0256 }
0257 kfree(pages);
0258
0259 *gfns = pfns;
0260 *virt = vaddr;
0261
0262 return 0;
0263 }
0264
0265 struct remap_pfn {
0266 struct mm_struct *mm;
0267 struct page **pages;
0268 pgprot_t prot;
0269 unsigned long i;
0270 };
0271
0272 static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
0273 {
0274 struct remap_pfn *r = data;
0275 struct page *page = r->pages[r->i];
0276 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
0277
0278 set_pte_at(r->mm, addr, ptep, pte);
0279 r->i++;
0280
0281 return 0;
0282 }
0283
0284
0285 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
0286 {
0287 struct remap_pfn r = {
0288 .mm = vma->vm_mm,
0289 .pages = vma->vm_private_data,
0290 .prot = vma->vm_page_prot,
0291 };
0292
0293 return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
0294 }
0295 EXPORT_SYMBOL_GPL(xen_remap_vma_range);