Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * MMU operations common to all auto-translated physmap guests.
0003  *
0004  * Copyright (C) 2015 Citrix Systems R&D Ltd.
0005  *
0006  * This program is free software; you can redistribute it and/or
0007  * modify it under the terms of the GNU General Public License version 2
0008  * as published by the Free Software Foundation; or, when distributed
0009  * separately from the Linux kernel or incorporated into other
0010  * software packages, subject to the following license:
0011  *
0012  * Permission is hereby granted, free of charge, to any person obtaining a copy
0013  * of this source file (the "Software"), to deal in the Software without
0014  * restriction, including without limitation the rights to use, copy, modify,
0015  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
0016  * and to permit persons to whom the Software is furnished to do so, subject to
0017  * the following conditions:
0018  *
0019  * The above copyright notice and this permission notice shall be included in
0020  * all copies or substantial portions of the Software.
0021  *
0022  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0023  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0024  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
0025  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0026  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0027  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0028  * IN THE SOFTWARE.
0029  */
0030 #include <linux/kernel.h>
0031 #include <linux/mm.h>
0032 #include <linux/slab.h>
0033 #include <linux/vmalloc.h>
0034 
0035 #include <asm/xen/hypercall.h>
0036 #include <asm/xen/hypervisor.h>
0037 
0038 #include <xen/xen.h>
0039 #include <xen/xen-ops.h>
0040 #include <xen/page.h>
0041 #include <xen/interface/xen.h>
0042 #include <xen/interface/memory.h>
0043 #include <xen/balloon.h>
0044 
0045 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
0046 
0047 /* Break down the pages in 4KB chunk and call fn for each gfn */
0048 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn,
0049                  xen_gfn_fn_t fn, void *data)
0050 {
0051     unsigned long xen_pfn = 0;
0052     struct page *page;
0053     int i;
0054 
0055     for (i = 0; i < nr_gfn; i++) {
0056         if ((i % XEN_PFN_PER_PAGE) == 0) {
0057             page = pages[i / XEN_PFN_PER_PAGE];
0058             xen_pfn = page_to_xen_pfn(page);
0059         }
0060         fn(pfn_to_gfn(xen_pfn++), data);
0061     }
0062 }
0063 
0064 struct remap_data {
0065     xen_pfn_t *fgfn; /* foreign domain's gfn */
0066     int nr_fgfn; /* Number of foreign gfn left to map */
0067     pgprot_t prot;
0068     domid_t  domid;
0069     struct vm_area_struct *vma;
0070     int index;
0071     struct page **pages;
0072     struct xen_remap_gfn_info *info;
0073     int *err_ptr;
0074     int mapped;
0075 
0076     /* Hypercall parameters */
0077     int h_errs[XEN_PFN_PER_PAGE];
0078     xen_ulong_t h_idxs[XEN_PFN_PER_PAGE];
0079     xen_pfn_t h_gpfns[XEN_PFN_PER_PAGE];
0080 
0081     int h_iter; /* Iterator */
0082 };
0083 
0084 static void setup_hparams(unsigned long gfn, void *data)
0085 {
0086     struct remap_data *info = data;
0087 
0088     info->h_idxs[info->h_iter] = *info->fgfn;
0089     info->h_gpfns[info->h_iter] = gfn;
0090     info->h_errs[info->h_iter] = 0;
0091 
0092     info->h_iter++;
0093     info->fgfn++;
0094 }
0095 
0096 static int remap_pte_fn(pte_t *ptep, unsigned long addr, void *data)
0097 {
0098     struct remap_data *info = data;
0099     struct page *page = info->pages[info->index++];
0100     pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot));
0101     int rc, nr_gfn;
0102     uint32_t i;
0103     struct xen_add_to_physmap_range xatp = {
0104         .domid = DOMID_SELF,
0105         .foreign_domid = info->domid,
0106         .space = XENMAPSPACE_gmfn_foreign,
0107     };
0108 
0109     nr_gfn = min_t(typeof(info->nr_fgfn), XEN_PFN_PER_PAGE, info->nr_fgfn);
0110     info->nr_fgfn -= nr_gfn;
0111 
0112     info->h_iter = 0;
0113     xen_for_each_gfn(&page, nr_gfn, setup_hparams, info);
0114     BUG_ON(info->h_iter != nr_gfn);
0115 
0116     set_xen_guest_handle(xatp.idxs, info->h_idxs);
0117     set_xen_guest_handle(xatp.gpfns, info->h_gpfns);
0118     set_xen_guest_handle(xatp.errs, info->h_errs);
0119     xatp.size = nr_gfn;
0120 
0121     rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
0122 
0123     /* info->err_ptr expect to have one error status per Xen PFN */
0124     for (i = 0; i < nr_gfn; i++) {
0125         int err = (rc < 0) ? rc : info->h_errs[i];
0126 
0127         *(info->err_ptr++) = err;
0128         if (!err)
0129             info->mapped++;
0130     }
0131 
0132     /*
0133      * Note: The hypercall will return 0 in most of the case if even if
0134      * all the fgmfn are not mapped. We still have to update the pte
0135      * as the userspace may decide to continue.
0136      */
0137     if (!rc)
0138         set_pte_at(info->vma->vm_mm, addr, ptep, pte);
0139 
0140     return 0;
0141 }
0142 
0143 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
0144                   unsigned long addr,
0145                   xen_pfn_t *gfn, int nr,
0146                   int *err_ptr, pgprot_t prot,
0147                   unsigned domid,
0148                   struct page **pages)
0149 {
0150     int err;
0151     struct remap_data data;
0152     unsigned long range = DIV_ROUND_UP(nr, XEN_PFN_PER_PAGE) << PAGE_SHIFT;
0153 
0154     /* Kept here for the purpose of making sure code doesn't break
0155        x86 PVOPS */
0156     BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
0157 
0158     data.fgfn = gfn;
0159     data.nr_fgfn = nr;
0160     data.prot  = prot;
0161     data.domid = domid;
0162     data.vma   = vma;
0163     data.pages = pages;
0164     data.index = 0;
0165     data.err_ptr = err_ptr;
0166     data.mapped = 0;
0167 
0168     err = apply_to_page_range(vma->vm_mm, addr, range,
0169                   remap_pte_fn, &data);
0170     return err < 0 ? err : data.mapped;
0171 }
0172 EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array);
0173 
0174 static void unmap_gfn(unsigned long gfn, void *data)
0175 {
0176     struct xen_remove_from_physmap xrp;
0177 
0178     xrp.domid = DOMID_SELF;
0179     xrp.gpfn = gfn;
0180     (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
0181 }
0182 
0183 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
0184                   int nr, struct page **pages)
0185 {
0186     xen_for_each_gfn(pages, nr, unmap_gfn, NULL);
0187 
0188     return 0;
0189 }
0190 EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
0191 
0192 struct map_balloon_pages {
0193     xen_pfn_t *pfns;
0194     unsigned int idx;
0195 };
0196 
0197 static void setup_balloon_gfn(unsigned long gfn, void *data)
0198 {
0199     struct map_balloon_pages *info = data;
0200 
0201     info->pfns[info->idx++] = gfn;
0202 }
0203 
0204 /**
0205  * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
0206  * @gfns: returns the array of corresponding GFNs
0207  * @virt: returns the virtual address of the mapped region
0208  * @nr_grant_frames: number of GFNs
0209  * @return 0 on success, error otherwise
0210  *
0211  * This allocates a set of ballooned pages and maps them into the
0212  * kernel's address space.
0213  */
0214 int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
0215                      unsigned long nr_grant_frames)
0216 {
0217     struct page **pages;
0218     xen_pfn_t *pfns;
0219     void *vaddr;
0220     struct map_balloon_pages data;
0221     int rc;
0222     unsigned long nr_pages;
0223 
0224     BUG_ON(nr_grant_frames == 0);
0225     nr_pages = DIV_ROUND_UP(nr_grant_frames, XEN_PFN_PER_PAGE);
0226     pages = kcalloc(nr_pages, sizeof(pages[0]), GFP_KERNEL);
0227     if (!pages)
0228         return -ENOMEM;
0229 
0230     pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
0231     if (!pfns) {
0232         kfree(pages);
0233         return -ENOMEM;
0234     }
0235     rc = xen_alloc_unpopulated_pages(nr_pages, pages);
0236     if (rc) {
0237         pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
0238             nr_pages, rc);
0239         kfree(pages);
0240         kfree(pfns);
0241         return rc;
0242     }
0243 
0244     data.pfns = pfns;
0245     data.idx = 0;
0246     xen_for_each_gfn(pages, nr_grant_frames, setup_balloon_gfn, &data);
0247 
0248     vaddr = vmap(pages, nr_pages, 0, PAGE_KERNEL);
0249     if (!vaddr) {
0250         pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
0251             nr_pages, rc);
0252         xen_free_unpopulated_pages(nr_pages, pages);
0253         kfree(pages);
0254         kfree(pfns);
0255         return -ENOMEM;
0256     }
0257     kfree(pages);
0258 
0259     *gfns = pfns;
0260     *virt = vaddr;
0261 
0262     return 0;
0263 }
0264 
0265 struct remap_pfn {
0266     struct mm_struct *mm;
0267     struct page **pages;
0268     pgprot_t prot;
0269     unsigned long i;
0270 };
0271 
0272 static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
0273 {
0274     struct remap_pfn *r = data;
0275     struct page *page = r->pages[r->i];
0276     pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
0277 
0278     set_pte_at(r->mm, addr, ptep, pte);
0279     r->i++;
0280 
0281     return 0;
0282 }
0283 
0284 /* Used by the privcmd module, but has to be built-in on ARM */
0285 int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
0286 {
0287     struct remap_pfn r = {
0288         .mm = vma->vm_mm,
0289         .pages = vma->vm_private_data,
0290         .prot = vma->vm_page_prot,
0291     };
0292 
0293     return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
0294 }
0295 EXPORT_SYMBOL_GPL(xen_remap_vma_range);