Back to home page

LXR

 
 

    


0001 /*
0002  * Xen mmu operations
0003  *
0004  * This file contains the various mmu fetch and update operations.
0005  * The most important job they must perform is the mapping between the
0006  * domain's pfn and the overall machine mfns.
0007  *
0008  * Xen allows guests to directly update the pagetable, in a controlled
0009  * fashion.  In other words, the guest modifies the same pagetable
0010  * that the CPU actually uses, which eliminates the overhead of having
0011  * a separate shadow pagetable.
0012  *
0013  * In order to allow this, it falls on the guest domain to map its
0014  * notion of a "physical" pfn - which is just a domain-local linear
0015  * address - into a real "machine address" which the CPU's MMU can
0016  * use.
0017  *
0018  * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
0019  * inserted directly into the pagetable.  When creating a new
0020  * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
0021  * when reading the content back with __(pgd|pmd|pte)_val, it converts
0022  * the mfn back into a pfn.
0023  *
0024  * The other constraint is that all pages which make up a pagetable
0025  * must be mapped read-only in the guest.  This prevents uncontrolled
0026  * guest updates to the pagetable.  Xen strictly enforces this, and
0027  * will disallow any pagetable update which will end up mapping a
0028  * pagetable page RW, and will disallow using any writable page as a
0029  * pagetable.
0030  *
0031  * Naively, when loading %cr3 with the base of a new pagetable, Xen
0032  * would need to validate the whole pagetable before going on.
0033  * Naturally, this is quite slow.  The solution is to "pin" a
0034  * pagetable, which enforces all the constraints on the pagetable even
0035  * when it is not actively in use.  This menas that Xen can be assured
0036  * that it is still valid when you do load it into %cr3, and doesn't
0037  * need to revalidate it.
0038  *
0039  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
0040  */
0041 #include <linux/sched.h>
0042 #include <linux/highmem.h>
0043 #include <linux/debugfs.h>
0044 #include <linux/bug.h>
0045 #include <linux/vmalloc.h>
0046 #include <linux/export.h>
0047 #include <linux/init.h>
0048 #include <linux/gfp.h>
0049 #include <linux/memblock.h>
0050 #include <linux/seq_file.h>
0051 #include <linux/crash_dump.h>
0052 
0053 #include <trace/events/xen.h>
0054 
0055 #include <asm/pgtable.h>
0056 #include <asm/tlbflush.h>
0057 #include <asm/fixmap.h>
0058 #include <asm/mmu_context.h>
0059 #include <asm/setup.h>
0060 #include <asm/paravirt.h>
0061 #include <asm/e820.h>
0062 #include <asm/linkage.h>
0063 #include <asm/page.h>
0064 #include <asm/init.h>
0065 #include <asm/pat.h>
0066 #include <asm/smp.h>
0067 
0068 #include <asm/xen/hypercall.h>
0069 #include <asm/xen/hypervisor.h>
0070 
0071 #include <xen/xen.h>
0072 #include <xen/page.h>
0073 #include <xen/interface/xen.h>
0074 #include <xen/interface/hvm/hvm_op.h>
0075 #include <xen/interface/version.h>
0076 #include <xen/interface/memory.h>
0077 #include <xen/hvc-console.h>
0078 
0079 #include "multicalls.h"
0080 #include "mmu.h"
0081 #include "debugfs.h"
0082 
0083 /*
0084  * Protects atomic reservation decrease/increase against concurrent increases.
0085  * Also protects non-atomic updates of current_pages and balloon lists.
0086  */
0087 DEFINE_SPINLOCK(xen_reservation_lock);
0088 
0089 #ifdef CONFIG_X86_32
0090 /*
0091  * Identity map, in addition to plain kernel map.  This needs to be
0092  * large enough to allocate page table pages to allocate the rest.
0093  * Each page can map 2MB.
0094  */
0095 #define LEVEL1_IDENT_ENTRIES    (PTRS_PER_PTE * 4)
0096 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
0097 #endif
0098 #ifdef CONFIG_X86_64
0099 /* l3 pud for userspace vsyscall mapping */
0100 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
0101 #endif /* CONFIG_X86_64 */
0102 
0103 /*
0104  * Note about cr3 (pagetable base) values:
0105  *
0106  * xen_cr3 contains the current logical cr3 value; it contains the
0107  * last set cr3.  This may not be the current effective cr3, because
0108  * its update may be being lazily deferred.  However, a vcpu looking
0109  * at its own cr3 can use this value knowing that it everything will
0110  * be self-consistent.
0111  *
0112  * xen_current_cr3 contains the actual vcpu cr3; it is set once the
0113  * hypercall to set the vcpu cr3 is complete (so it may be a little
0114  * out of date, but it will never be set early).  If one vcpu is
0115  * looking at another vcpu's cr3 value, it should use this variable.
0116  */
0117 DEFINE_PER_CPU(unsigned long, xen_cr3);  /* cr3 stored as physaddr */
0118 DEFINE_PER_CPU(unsigned long, xen_current_cr3);  /* actual vcpu cr3 */
0119 
0120 static phys_addr_t xen_pt_base, xen_pt_size __initdata;
0121 
0122 /*
0123  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
0124  * redzone above it, so round it up to a PGD boundary.
0125  */
0126 #define USER_LIMIT  ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
0127 
0128 unsigned long arbitrary_virt_to_mfn(void *vaddr)
0129 {
0130     xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
0131 
0132     return PFN_DOWN(maddr.maddr);
0133 }
0134 
0135 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
0136 {
0137     unsigned long address = (unsigned long)vaddr;
0138     unsigned int level;
0139     pte_t *pte;
0140     unsigned offset;
0141 
0142     /*
0143      * if the PFN is in the linear mapped vaddr range, we can just use
0144      * the (quick) virt_to_machine() p2m lookup
0145      */
0146     if (virt_addr_valid(vaddr))
0147         return virt_to_machine(vaddr);
0148 
0149     /* otherwise we have to do a (slower) full page-table walk */
0150 
0151     pte = lookup_address(address, &level);
0152     BUG_ON(pte == NULL);
0153     offset = address & ~PAGE_MASK;
0154     return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
0155 }
0156 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
0157 
0158 void make_lowmem_page_readonly(void *vaddr)
0159 {
0160     pte_t *pte, ptev;
0161     unsigned long address = (unsigned long)vaddr;
0162     unsigned int level;
0163 
0164     pte = lookup_address(address, &level);
0165     if (pte == NULL)
0166         return;     /* vaddr missing */
0167 
0168     ptev = pte_wrprotect(*pte);
0169 
0170     if (HYPERVISOR_update_va_mapping(address, ptev, 0))
0171         BUG();
0172 }
0173 
0174 void make_lowmem_page_readwrite(void *vaddr)
0175 {
0176     pte_t *pte, ptev;
0177     unsigned long address = (unsigned long)vaddr;
0178     unsigned int level;
0179 
0180     pte = lookup_address(address, &level);
0181     if (pte == NULL)
0182         return;     /* vaddr missing */
0183 
0184     ptev = pte_mkwrite(*pte);
0185 
0186     if (HYPERVISOR_update_va_mapping(address, ptev, 0))
0187         BUG();
0188 }
0189 
0190 
0191 static bool xen_page_pinned(void *ptr)
0192 {
0193     struct page *page = virt_to_page(ptr);
0194 
0195     return PagePinned(page);
0196 }
0197 
0198 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
0199 {
0200     struct multicall_space mcs;
0201     struct mmu_update *u;
0202 
0203     trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
0204 
0205     mcs = xen_mc_entry(sizeof(*u));
0206     u = mcs.args;
0207 
0208     /* ptep might be kmapped when using 32-bit HIGHPTE */
0209     u->ptr = virt_to_machine(ptep).maddr;
0210     u->val = pte_val_ma(pteval);
0211 
0212     MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
0213 
0214     xen_mc_issue(PARAVIRT_LAZY_MMU);
0215 }
0216 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
0217 
0218 static void xen_extend_mmu_update(const struct mmu_update *update)
0219 {
0220     struct multicall_space mcs;
0221     struct mmu_update *u;
0222 
0223     mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
0224 
0225     if (mcs.mc != NULL) {
0226         mcs.mc->args[1]++;
0227     } else {
0228         mcs = __xen_mc_entry(sizeof(*u));
0229         MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
0230     }
0231 
0232     u = mcs.args;
0233     *u = *update;
0234 }
0235 
0236 static void xen_extend_mmuext_op(const struct mmuext_op *op)
0237 {
0238     struct multicall_space mcs;
0239     struct mmuext_op *u;
0240 
0241     mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
0242 
0243     if (mcs.mc != NULL) {
0244         mcs.mc->args[1]++;
0245     } else {
0246         mcs = __xen_mc_entry(sizeof(*u));
0247         MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
0248     }
0249 
0250     u = mcs.args;
0251     *u = *op;
0252 }
0253 
0254 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
0255 {
0256     struct mmu_update u;
0257 
0258     preempt_disable();
0259 
0260     xen_mc_batch();
0261 
0262     /* ptr may be ioremapped for 64-bit pagetable setup */
0263     u.ptr = arbitrary_virt_to_machine(ptr).maddr;
0264     u.val = pmd_val_ma(val);
0265     xen_extend_mmu_update(&u);
0266 
0267     xen_mc_issue(PARAVIRT_LAZY_MMU);
0268 
0269     preempt_enable();
0270 }
0271 
0272 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
0273 {
0274     trace_xen_mmu_set_pmd(ptr, val);
0275 
0276     /* If page is not pinned, we can just update the entry
0277        directly */
0278     if (!xen_page_pinned(ptr)) {
0279         *ptr = val;
0280         return;
0281     }
0282 
0283     xen_set_pmd_hyper(ptr, val);
0284 }
0285 
0286 /*
0287  * Associate a virtual page frame with a given physical page frame
0288  * and protection flags for that frame.
0289  */
0290 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
0291 {
0292     set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
0293 }
0294 
0295 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
0296 {
0297     struct mmu_update u;
0298 
0299     if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
0300         return false;
0301 
0302     xen_mc_batch();
0303 
0304     u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
0305     u.val = pte_val_ma(pteval);
0306     xen_extend_mmu_update(&u);
0307 
0308     xen_mc_issue(PARAVIRT_LAZY_MMU);
0309 
0310     return true;
0311 }
0312 
0313 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
0314 {
0315     if (!xen_batched_set_pte(ptep, pteval)) {
0316         /*
0317          * Could call native_set_pte() here and trap and
0318          * emulate the PTE write but with 32-bit guests this
0319          * needs two traps (one for each of the two 32-bit
0320          * words in the PTE) so do one hypercall directly
0321          * instead.
0322          */
0323         struct mmu_update u;
0324 
0325         u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
0326         u.val = pte_val_ma(pteval);
0327         HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
0328     }
0329 }
0330 
0331 static void xen_set_pte(pte_t *ptep, pte_t pteval)
0332 {
0333     trace_xen_mmu_set_pte(ptep, pteval);
0334     __xen_set_pte(ptep, pteval);
0335 }
0336 
0337 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
0338             pte_t *ptep, pte_t pteval)
0339 {
0340     trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
0341     __xen_set_pte(ptep, pteval);
0342 }
0343 
0344 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
0345                  unsigned long addr, pte_t *ptep)
0346 {
0347     /* Just return the pte as-is.  We preserve the bits on commit */
0348     trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
0349     return *ptep;
0350 }
0351 
0352 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
0353                  pte_t *ptep, pte_t pte)
0354 {
0355     struct mmu_update u;
0356 
0357     trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
0358     xen_mc_batch();
0359 
0360     u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
0361     u.val = pte_val_ma(pte);
0362     xen_extend_mmu_update(&u);
0363 
0364     xen_mc_issue(PARAVIRT_LAZY_MMU);
0365 }
0366 
0367 /* Assume pteval_t is equivalent to all the other *val_t types. */
0368 static pteval_t pte_mfn_to_pfn(pteval_t val)
0369 {
0370     if (val & _PAGE_PRESENT) {
0371         unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
0372         unsigned long pfn = mfn_to_pfn(mfn);
0373 
0374         pteval_t flags = val & PTE_FLAGS_MASK;
0375         if (unlikely(pfn == ~0))
0376             val = flags & ~_PAGE_PRESENT;
0377         else
0378             val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
0379     }
0380 
0381     return val;
0382 }
0383 
0384 static pteval_t pte_pfn_to_mfn(pteval_t val)
0385 {
0386     if (val & _PAGE_PRESENT) {
0387         unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
0388         pteval_t flags = val & PTE_FLAGS_MASK;
0389         unsigned long mfn;
0390 
0391         if (!xen_feature(XENFEAT_auto_translated_physmap))
0392             mfn = __pfn_to_mfn(pfn);
0393         else
0394             mfn = pfn;
0395         /*
0396          * If there's no mfn for the pfn, then just create an
0397          * empty non-present pte.  Unfortunately this loses
0398          * information about the original pfn, so
0399          * pte_mfn_to_pfn is asymmetric.
0400          */
0401         if (unlikely(mfn == INVALID_P2M_ENTRY)) {
0402             mfn = 0;
0403             flags = 0;
0404         } else
0405             mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
0406         val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
0407     }
0408 
0409     return val;
0410 }
0411 
0412 __visible pteval_t xen_pte_val(pte_t pte)
0413 {
0414     pteval_t pteval = pte.pte;
0415 
0416     return pte_mfn_to_pfn(pteval);
0417 }
0418 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
0419 
0420 __visible pgdval_t xen_pgd_val(pgd_t pgd)
0421 {
0422     return pte_mfn_to_pfn(pgd.pgd);
0423 }
0424 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
0425 
0426 __visible pte_t xen_make_pte(pteval_t pte)
0427 {
0428     pte = pte_pfn_to_mfn(pte);
0429 
0430     return native_make_pte(pte);
0431 }
0432 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
0433 
0434 __visible pgd_t xen_make_pgd(pgdval_t pgd)
0435 {
0436     pgd = pte_pfn_to_mfn(pgd);
0437     return native_make_pgd(pgd);
0438 }
0439 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
0440 
0441 __visible pmdval_t xen_pmd_val(pmd_t pmd)
0442 {
0443     return pte_mfn_to_pfn(pmd.pmd);
0444 }
0445 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
0446 
0447 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
0448 {
0449     struct mmu_update u;
0450 
0451     preempt_disable();
0452 
0453     xen_mc_batch();
0454 
0455     /* ptr may be ioremapped for 64-bit pagetable setup */
0456     u.ptr = arbitrary_virt_to_machine(ptr).maddr;
0457     u.val = pud_val_ma(val);
0458     xen_extend_mmu_update(&u);
0459 
0460     xen_mc_issue(PARAVIRT_LAZY_MMU);
0461 
0462     preempt_enable();
0463 }
0464 
0465 static void xen_set_pud(pud_t *ptr, pud_t val)
0466 {
0467     trace_xen_mmu_set_pud(ptr, val);
0468 
0469     /* If page is not pinned, we can just update the entry
0470        directly */
0471     if (!xen_page_pinned(ptr)) {
0472         *ptr = val;
0473         return;
0474     }
0475 
0476     xen_set_pud_hyper(ptr, val);
0477 }
0478 
0479 #ifdef CONFIG_X86_PAE
0480 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
0481 {
0482     trace_xen_mmu_set_pte_atomic(ptep, pte);
0483     set_64bit((u64 *)ptep, native_pte_val(pte));
0484 }
0485 
0486 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
0487 {
0488     trace_xen_mmu_pte_clear(mm, addr, ptep);
0489     if (!xen_batched_set_pte(ptep, native_make_pte(0)))
0490         native_pte_clear(mm, addr, ptep);
0491 }
0492 
0493 static void xen_pmd_clear(pmd_t *pmdp)
0494 {
0495     trace_xen_mmu_pmd_clear(pmdp);
0496     set_pmd(pmdp, __pmd(0));
0497 }
0498 #endif  /* CONFIG_X86_PAE */
0499 
0500 __visible pmd_t xen_make_pmd(pmdval_t pmd)
0501 {
0502     pmd = pte_pfn_to_mfn(pmd);
0503     return native_make_pmd(pmd);
0504 }
0505 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
0506 
0507 #if CONFIG_PGTABLE_LEVELS == 4
0508 __visible pudval_t xen_pud_val(pud_t pud)
0509 {
0510     return pte_mfn_to_pfn(pud.pud);
0511 }
0512 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
0513 
0514 __visible pud_t xen_make_pud(pudval_t pud)
0515 {
0516     pud = pte_pfn_to_mfn(pud);
0517 
0518     return native_make_pud(pud);
0519 }
0520 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
0521 
0522 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
0523 {
0524     pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
0525     unsigned offset = pgd - pgd_page;
0526     pgd_t *user_ptr = NULL;
0527 
0528     if (offset < pgd_index(USER_LIMIT)) {
0529         struct page *page = virt_to_page(pgd_page);
0530         user_ptr = (pgd_t *)page->private;
0531         if (user_ptr)
0532             user_ptr += offset;
0533     }
0534 
0535     return user_ptr;
0536 }
0537 
0538 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
0539 {
0540     struct mmu_update u;
0541 
0542     u.ptr = virt_to_machine(ptr).maddr;
0543     u.val = pgd_val_ma(val);
0544     xen_extend_mmu_update(&u);
0545 }
0546 
0547 /*
0548  * Raw hypercall-based set_pgd, intended for in early boot before
0549  * there's a page structure.  This implies:
0550  *  1. The only existing pagetable is the kernel's
0551  *  2. It is always pinned
0552  *  3. It has no user pagetable attached to it
0553  */
0554 static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
0555 {
0556     preempt_disable();
0557 
0558     xen_mc_batch();
0559 
0560     __xen_set_pgd_hyper(ptr, val);
0561 
0562     xen_mc_issue(PARAVIRT_LAZY_MMU);
0563 
0564     preempt_enable();
0565 }
0566 
0567 static void xen_set_pgd(pgd_t *ptr, pgd_t val)
0568 {
0569     pgd_t *user_ptr = xen_get_user_pgd(ptr);
0570 
0571     trace_xen_mmu_set_pgd(ptr, user_ptr, val);
0572 
0573     /* If page is not pinned, we can just update the entry
0574        directly */
0575     if (!xen_page_pinned(ptr)) {
0576         *ptr = val;
0577         if (user_ptr) {
0578             WARN_ON(xen_page_pinned(user_ptr));
0579             *user_ptr = val;
0580         }
0581         return;
0582     }
0583 
0584     /* If it's pinned, then we can at least batch the kernel and
0585        user updates together. */
0586     xen_mc_batch();
0587 
0588     __xen_set_pgd_hyper(ptr, val);
0589     if (user_ptr)
0590         __xen_set_pgd_hyper(user_ptr, val);
0591 
0592     xen_mc_issue(PARAVIRT_LAZY_MMU);
0593 }
0594 #endif  /* CONFIG_PGTABLE_LEVELS == 4 */
0595 
0596 /*
0597  * (Yet another) pagetable walker.  This one is intended for pinning a
0598  * pagetable.  This means that it walks a pagetable and calls the
0599  * callback function on each page it finds making up the page table,
0600  * at every level.  It walks the entire pagetable, but it only bothers
0601  * pinning pte pages which are below limit.  In the normal case this
0602  * will be STACK_TOP_MAX, but at boot we need to pin up to
0603  * FIXADDR_TOP.
0604  *
0605  * For 32-bit the important bit is that we don't pin beyond there,
0606  * because then we start getting into Xen's ptes.
0607  *
0608  * For 64-bit, we must skip the Xen hole in the middle of the address
0609  * space, just after the big x86-64 virtual hole.
0610  */
0611 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
0612               int (*func)(struct mm_struct *mm, struct page *,
0613                       enum pt_level),
0614               unsigned long limit)
0615 {
0616     int flush = 0;
0617     unsigned hole_low, hole_high;
0618     unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
0619     unsigned pgdidx, pudidx, pmdidx;
0620 
0621     /* The limit is the last byte to be touched */
0622     limit--;
0623     BUG_ON(limit >= FIXADDR_TOP);
0624 
0625     if (xen_feature(XENFEAT_auto_translated_physmap))
0626         return 0;
0627 
0628     /*
0629      * 64-bit has a great big hole in the middle of the address
0630      * space, which contains the Xen mappings.  On 32-bit these
0631      * will end up making a zero-sized hole and so is a no-op.
0632      */
0633     hole_low = pgd_index(USER_LIMIT);
0634     hole_high = pgd_index(PAGE_OFFSET);
0635 
0636     pgdidx_limit = pgd_index(limit);
0637 #if PTRS_PER_PUD > 1
0638     pudidx_limit = pud_index(limit);
0639 #else
0640     pudidx_limit = 0;
0641 #endif
0642 #if PTRS_PER_PMD > 1
0643     pmdidx_limit = pmd_index(limit);
0644 #else
0645     pmdidx_limit = 0;
0646 #endif
0647 
0648     for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
0649         pud_t *pud;
0650 
0651         if (pgdidx >= hole_low && pgdidx < hole_high)
0652             continue;
0653 
0654         if (!pgd_val(pgd[pgdidx]))
0655             continue;
0656 
0657         pud = pud_offset(&pgd[pgdidx], 0);
0658 
0659         if (PTRS_PER_PUD > 1) /* not folded */
0660             flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
0661 
0662         for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
0663             pmd_t *pmd;
0664 
0665             if (pgdidx == pgdidx_limit &&
0666                 pudidx > pudidx_limit)
0667                 goto out;
0668 
0669             if (pud_none(pud[pudidx]))
0670                 continue;
0671 
0672             pmd = pmd_offset(&pud[pudidx], 0);
0673 
0674             if (PTRS_PER_PMD > 1) /* not folded */
0675                 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
0676 
0677             for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
0678                 struct page *pte;
0679 
0680                 if (pgdidx == pgdidx_limit &&
0681                     pudidx == pudidx_limit &&
0682                     pmdidx > pmdidx_limit)
0683                     goto out;
0684 
0685                 if (pmd_none(pmd[pmdidx]))
0686                     continue;
0687 
0688                 pte = pmd_page(pmd[pmdidx]);
0689                 flush |= (*func)(mm, pte, PT_PTE);
0690             }
0691         }
0692     }
0693 
0694 out:
0695     /* Do the top level last, so that the callbacks can use it as
0696        a cue to do final things like tlb flushes. */
0697     flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
0698 
0699     return flush;
0700 }
0701 
0702 static int xen_pgd_walk(struct mm_struct *mm,
0703             int (*func)(struct mm_struct *mm, struct page *,
0704                     enum pt_level),
0705             unsigned long limit)
0706 {
0707     return __xen_pgd_walk(mm, mm->pgd, func, limit);
0708 }
0709 
0710 /* If we're using split pte locks, then take the page's lock and
0711    return a pointer to it.  Otherwise return NULL. */
0712 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
0713 {
0714     spinlock_t *ptl = NULL;
0715 
0716 #if USE_SPLIT_PTE_PTLOCKS
0717     ptl = ptlock_ptr(page);
0718     spin_lock_nest_lock(ptl, &mm->page_table_lock);
0719 #endif
0720 
0721     return ptl;
0722 }
0723 
0724 static void xen_pte_unlock(void *v)
0725 {
0726     spinlock_t *ptl = v;
0727     spin_unlock(ptl);
0728 }
0729 
0730 static void xen_do_pin(unsigned level, unsigned long pfn)
0731 {
0732     struct mmuext_op op;
0733 
0734     op.cmd = level;
0735     op.arg1.mfn = pfn_to_mfn(pfn);
0736 
0737     xen_extend_mmuext_op(&op);
0738 }
0739 
0740 static int xen_pin_page(struct mm_struct *mm, struct page *page,
0741             enum pt_level level)
0742 {
0743     unsigned pgfl = TestSetPagePinned(page);
0744     int flush;
0745 
0746     if (pgfl)
0747         flush = 0;      /* already pinned */
0748     else if (PageHighMem(page))
0749         /* kmaps need flushing if we found an unpinned
0750            highpage */
0751         flush = 1;
0752     else {
0753         void *pt = lowmem_page_address(page);
0754         unsigned long pfn = page_to_pfn(page);
0755         struct multicall_space mcs = __xen_mc_entry(0);
0756         spinlock_t *ptl;
0757 
0758         flush = 0;
0759 
0760         /*
0761          * We need to hold the pagetable lock between the time
0762          * we make the pagetable RO and when we actually pin
0763          * it.  If we don't, then other users may come in and
0764          * attempt to update the pagetable by writing it,
0765          * which will fail because the memory is RO but not
0766          * pinned, so Xen won't do the trap'n'emulate.
0767          *
0768          * If we're using split pte locks, we can't hold the
0769          * entire pagetable's worth of locks during the
0770          * traverse, because we may wrap the preempt count (8
0771          * bits).  The solution is to mark RO and pin each PTE
0772          * page while holding the lock.  This means the number
0773          * of locks we end up holding is never more than a
0774          * batch size (~32 entries, at present).
0775          *
0776          * If we're not using split pte locks, we needn't pin
0777          * the PTE pages independently, because we're
0778          * protected by the overall pagetable lock.
0779          */
0780         ptl = NULL;
0781         if (level == PT_PTE)
0782             ptl = xen_pte_lock(page, mm);
0783 
0784         MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
0785                     pfn_pte(pfn, PAGE_KERNEL_RO),
0786                     level == PT_PGD ? UVMF_TLB_FLUSH : 0);
0787 
0788         if (ptl) {
0789             xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
0790 
0791             /* Queue a deferred unlock for when this batch
0792                is completed. */
0793             xen_mc_callback(xen_pte_unlock, ptl);
0794         }
0795     }
0796 
0797     return flush;
0798 }
0799 
0800 /* This is called just after a mm has been created, but it has not
0801    been used yet.  We need to make sure that its pagetable is all
0802    read-only, and can be pinned. */
0803 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
0804 {
0805     trace_xen_mmu_pgd_pin(mm, pgd);
0806 
0807     xen_mc_batch();
0808 
0809     if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
0810         /* re-enable interrupts for flushing */
0811         xen_mc_issue(0);
0812 
0813         kmap_flush_unused();
0814 
0815         xen_mc_batch();
0816     }
0817 
0818 #ifdef CONFIG_X86_64
0819     {
0820         pgd_t *user_pgd = xen_get_user_pgd(pgd);
0821 
0822         xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
0823 
0824         if (user_pgd) {
0825             xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
0826             xen_do_pin(MMUEXT_PIN_L4_TABLE,
0827                    PFN_DOWN(__pa(user_pgd)));
0828         }
0829     }
0830 #else /* CONFIG_X86_32 */
0831 #ifdef CONFIG_X86_PAE
0832     /* Need to make sure unshared kernel PMD is pinnable */
0833     xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
0834              PT_PMD);
0835 #endif
0836     xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
0837 #endif /* CONFIG_X86_64 */
0838     xen_mc_issue(0);
0839 }
0840 
0841 static void xen_pgd_pin(struct mm_struct *mm)
0842 {
0843     __xen_pgd_pin(mm, mm->pgd);
0844 }
0845 
0846 /*
0847  * On save, we need to pin all pagetables to make sure they get their
0848  * mfns turned into pfns.  Search the list for any unpinned pgds and pin
0849  * them (unpinned pgds are not currently in use, probably because the
0850  * process is under construction or destruction).
0851  *
0852  * Expected to be called in stop_machine() ("equivalent to taking
0853  * every spinlock in the system"), so the locking doesn't really
0854  * matter all that much.
0855  */
0856 void xen_mm_pin_all(void)
0857 {
0858     struct page *page;
0859 
0860     spin_lock(&pgd_lock);
0861 
0862     list_for_each_entry(page, &pgd_list, lru) {
0863         if (!PagePinned(page)) {
0864             __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
0865             SetPageSavePinned(page);
0866         }
0867     }
0868 
0869     spin_unlock(&pgd_lock);
0870 }
0871 
0872 /*
0873  * The init_mm pagetable is really pinned as soon as its created, but
0874  * that's before we have page structures to store the bits.  So do all
0875  * the book-keeping now.
0876  */
0877 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
0878                   enum pt_level level)
0879 {
0880     SetPagePinned(page);
0881     return 0;
0882 }
0883 
0884 static void __init xen_mark_init_mm_pinned(void)
0885 {
0886     xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
0887 }
0888 
0889 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
0890               enum pt_level level)
0891 {
0892     unsigned pgfl = TestClearPagePinned(page);
0893 
0894     if (pgfl && !PageHighMem(page)) {
0895         void *pt = lowmem_page_address(page);
0896         unsigned long pfn = page_to_pfn(page);
0897         spinlock_t *ptl = NULL;
0898         struct multicall_space mcs;
0899 
0900         /*
0901          * Do the converse to pin_page.  If we're using split
0902          * pte locks, we must be holding the lock for while
0903          * the pte page is unpinned but still RO to prevent
0904          * concurrent updates from seeing it in this
0905          * partially-pinned state.
0906          */
0907         if (level == PT_PTE) {
0908             ptl = xen_pte_lock(page, mm);
0909 
0910             if (ptl)
0911                 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
0912         }
0913 
0914         mcs = __xen_mc_entry(0);
0915 
0916         MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
0917                     pfn_pte(pfn, PAGE_KERNEL),
0918                     level == PT_PGD ? UVMF_TLB_FLUSH : 0);
0919 
0920         if (ptl) {
0921             /* unlock when batch completed */
0922             xen_mc_callback(xen_pte_unlock, ptl);
0923         }
0924     }
0925 
0926     return 0;       /* never need to flush on unpin */
0927 }
0928 
0929 /* Release a pagetables pages back as normal RW */
0930 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
0931 {
0932     trace_xen_mmu_pgd_unpin(mm, pgd);
0933 
0934     xen_mc_batch();
0935 
0936     xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
0937 
0938 #ifdef CONFIG_X86_64
0939     {
0940         pgd_t *user_pgd = xen_get_user_pgd(pgd);
0941 
0942         if (user_pgd) {
0943             xen_do_pin(MMUEXT_UNPIN_TABLE,
0944                    PFN_DOWN(__pa(user_pgd)));
0945             xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
0946         }
0947     }
0948 #endif
0949 
0950 #ifdef CONFIG_X86_PAE
0951     /* Need to make sure unshared kernel PMD is unpinned */
0952     xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
0953                PT_PMD);
0954 #endif
0955 
0956     __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
0957 
0958     xen_mc_issue(0);
0959 }
0960 
0961 static void xen_pgd_unpin(struct mm_struct *mm)
0962 {
0963     __xen_pgd_unpin(mm, mm->pgd);
0964 }
0965 
0966 /*
0967  * On resume, undo any pinning done at save, so that the rest of the
0968  * kernel doesn't see any unexpected pinned pagetables.
0969  */
0970 void xen_mm_unpin_all(void)
0971 {
0972     struct page *page;
0973 
0974     spin_lock(&pgd_lock);
0975 
0976     list_for_each_entry(page, &pgd_list, lru) {
0977         if (PageSavePinned(page)) {
0978             BUG_ON(!PagePinned(page));
0979             __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
0980             ClearPageSavePinned(page);
0981         }
0982     }
0983 
0984     spin_unlock(&pgd_lock);
0985 }
0986 
0987 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
0988 {
0989     spin_lock(&next->page_table_lock);
0990     xen_pgd_pin(next);
0991     spin_unlock(&next->page_table_lock);
0992 }
0993 
0994 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
0995 {
0996     spin_lock(&mm->page_table_lock);
0997     xen_pgd_pin(mm);
0998     spin_unlock(&mm->page_table_lock);
0999 }
1000 
1001 
1002 #ifdef CONFIG_SMP
1003 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1004    we need to repoint it somewhere else before we can unpin it. */
1005 static void drop_other_mm_ref(void *info)
1006 {
1007     struct mm_struct *mm = info;
1008     struct mm_struct *active_mm;
1009 
1010     active_mm = this_cpu_read(cpu_tlbstate.active_mm);
1011 
1012     if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1013         leave_mm(smp_processor_id());
1014 
1015     /* If this cpu still has a stale cr3 reference, then make sure
1016        it has been flushed. */
1017     if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1018         load_cr3(swapper_pg_dir);
1019 }
1020 
1021 static void xen_drop_mm_ref(struct mm_struct *mm)
1022 {
1023     cpumask_var_t mask;
1024     unsigned cpu;
1025 
1026     if (current->active_mm == mm) {
1027         if (current->mm == mm)
1028             load_cr3(swapper_pg_dir);
1029         else
1030             leave_mm(smp_processor_id());
1031     }
1032 
1033     /* Get the "official" set of cpus referring to our pagetable. */
1034     if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1035         for_each_online_cpu(cpu) {
1036             if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1037                 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1038                 continue;
1039             smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1040         }
1041         return;
1042     }
1043     cpumask_copy(mask, mm_cpumask(mm));
1044 
1045     /* It's possible that a vcpu may have a stale reference to our
1046        cr3, because its in lazy mode, and it hasn't yet flushed
1047        its set of pending hypercalls yet.  In this case, we can
1048        look at its actual current cr3 value, and force it to flush
1049        if needed. */
1050     for_each_online_cpu(cpu) {
1051         if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1052             cpumask_set_cpu(cpu, mask);
1053     }
1054 
1055     if (!cpumask_empty(mask))
1056         smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1057     free_cpumask_var(mask);
1058 }
1059 #else
1060 static void xen_drop_mm_ref(struct mm_struct *mm)
1061 {
1062     if (current->active_mm == mm)
1063         load_cr3(swapper_pg_dir);
1064 }
1065 #endif
1066 
1067 /*
1068  * While a process runs, Xen pins its pagetables, which means that the
1069  * hypervisor forces it to be read-only, and it controls all updates
1070  * to it.  This means that all pagetable updates have to go via the
1071  * hypervisor, which is moderately expensive.
1072  *
1073  * Since we're pulling the pagetable down, we switch to use init_mm,
1074  * unpin old process pagetable and mark it all read-write, which
1075  * allows further operations on it to be simple memory accesses.
1076  *
1077  * The only subtle point is that another CPU may be still using the
1078  * pagetable because of lazy tlb flushing.  This means we need need to
1079  * switch all CPUs off this pagetable before we can unpin it.
1080  */
1081 static void xen_exit_mmap(struct mm_struct *mm)
1082 {
1083     get_cpu();      /* make sure we don't move around */
1084     xen_drop_mm_ref(mm);
1085     put_cpu();
1086 
1087     spin_lock(&mm->page_table_lock);
1088 
1089     /* pgd may not be pinned in the error exit path of execve */
1090     if (xen_page_pinned(mm->pgd))
1091         xen_pgd_unpin(mm);
1092 
1093     spin_unlock(&mm->page_table_lock);
1094 }
1095 
1096 static void xen_post_allocator_init(void);
1097 
1098 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1099 {
1100     struct mmuext_op op;
1101 
1102     op.cmd = cmd;
1103     op.arg1.mfn = pfn_to_mfn(pfn);
1104     if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1105         BUG();
1106 }
1107 
1108 #ifdef CONFIG_X86_64
1109 static void __init xen_cleanhighmap(unsigned long vaddr,
1110                     unsigned long vaddr_end)
1111 {
1112     unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1113     pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1114 
1115     /* NOTE: The loop is more greedy than the cleanup_highmap variant.
1116      * We include the PMD passed in on _both_ boundaries. */
1117     for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1118             pmd++, vaddr += PMD_SIZE) {
1119         if (pmd_none(*pmd))
1120             continue;
1121         if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1122             set_pmd(pmd, __pmd(0));
1123     }
1124     /* In case we did something silly, we should crash in this function
1125      * instead of somewhere later and be confusing. */
1126     xen_mc_flush();
1127 }
1128 
1129 /*
1130  * Make a page range writeable and free it.
1131  */
1132 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1133 {
1134     void *vaddr = __va(paddr);
1135     void *vaddr_end = vaddr + size;
1136 
1137     for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1138         make_lowmem_page_readwrite(vaddr);
1139 
1140     memblock_free(paddr, size);
1141 }
1142 
1143 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1144 {
1145     unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1146 
1147     if (unpin)
1148         pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1149     ClearPagePinned(virt_to_page(__va(pa)));
1150     xen_free_ro_pages(pa, PAGE_SIZE);
1151 }
1152 
1153 /*
1154  * Since it is well isolated we can (and since it is perhaps large we should)
1155  * also free the page tables mapping the initial P->M table.
1156  */
1157 static void __init xen_cleanmfnmap(unsigned long vaddr)
1158 {
1159     unsigned long va = vaddr & PMD_MASK;
1160     unsigned long pa;
1161     pgd_t *pgd = pgd_offset_k(va);
1162     pud_t *pud_page = pud_offset(pgd, 0);
1163     pud_t *pud;
1164     pmd_t *pmd;
1165     pte_t *pte;
1166     unsigned int i;
1167     bool unpin;
1168 
1169     unpin = (vaddr == 2 * PGDIR_SIZE);
1170     set_pgd(pgd, __pgd(0));
1171     do {
1172         pud = pud_page + pud_index(va);
1173         if (pud_none(*pud)) {
1174             va += PUD_SIZE;
1175         } else if (pud_large(*pud)) {
1176             pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1177             xen_free_ro_pages(pa, PUD_SIZE);
1178             va += PUD_SIZE;
1179         } else {
1180             pmd = pmd_offset(pud, va);
1181             if (pmd_large(*pmd)) {
1182                 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1183                 xen_free_ro_pages(pa, PMD_SIZE);
1184             } else if (!pmd_none(*pmd)) {
1185                 pte = pte_offset_kernel(pmd, va);
1186                 set_pmd(pmd, __pmd(0));
1187                 for (i = 0; i < PTRS_PER_PTE; ++i) {
1188                     if (pte_none(pte[i]))
1189                         break;
1190                     pa = pte_pfn(pte[i]) << PAGE_SHIFT;
1191                     xen_free_ro_pages(pa, PAGE_SIZE);
1192                 }
1193                 xen_cleanmfnmap_free_pgtbl(pte, unpin);
1194             }
1195             va += PMD_SIZE;
1196             if (pmd_index(va))
1197                 continue;
1198             set_pud(pud, __pud(0));
1199             xen_cleanmfnmap_free_pgtbl(pmd, unpin);
1200         }
1201 
1202     } while (pud_index(va) || pmd_index(va));
1203     xen_cleanmfnmap_free_pgtbl(pud_page, unpin);
1204 }
1205 
1206 static void __init xen_pagetable_p2m_free(void)
1207 {
1208     unsigned long size;
1209     unsigned long addr;
1210 
1211     size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1212 
1213     /* No memory or already called. */
1214     if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1215         return;
1216 
1217     /* using __ka address and sticking INVALID_P2M_ENTRY! */
1218     memset((void *)xen_start_info->mfn_list, 0xff, size);
1219 
1220     addr = xen_start_info->mfn_list;
1221     /*
1222      * We could be in __ka space.
1223      * We roundup to the PMD, which means that if anybody at this stage is
1224      * using the __ka address of xen_start_info or
1225      * xen_start_info->shared_info they are in going to crash. Fortunatly
1226      * we have already revectored in xen_setup_kernel_pagetable and in
1227      * xen_setup_shared_info.
1228      */
1229     size = roundup(size, PMD_SIZE);
1230 
1231     if (addr >= __START_KERNEL_map) {
1232         xen_cleanhighmap(addr, addr + size);
1233         size = PAGE_ALIGN(xen_start_info->nr_pages *
1234                   sizeof(unsigned long));
1235         memblock_free(__pa(addr), size);
1236     } else {
1237         xen_cleanmfnmap(addr);
1238     }
1239 }
1240 
1241 static void __init xen_pagetable_cleanhighmap(void)
1242 {
1243     unsigned long size;
1244     unsigned long addr;
1245 
1246     /* At this stage, cleanup_highmap has already cleaned __ka space
1247      * from _brk_limit way up to the max_pfn_mapped (which is the end of
1248      * the ramdisk). We continue on, erasing PMD entries that point to page
1249      * tables - do note that they are accessible at this stage via __va.
1250      * For good measure we also round up to the PMD - which means that if
1251      * anybody is using __ka address to the initial boot-stack - and try
1252      * to use it - they are going to crash. The xen_start_info has been
1253      * taken care of already in xen_setup_kernel_pagetable. */
1254     addr = xen_start_info->pt_base;
1255     size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1256 
1257     xen_cleanhighmap(addr, addr + size);
1258     xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1259 #ifdef DEBUG
1260     /* This is superfluous and is not necessary, but you know what
1261      * lets do it. The MODULES_VADDR -> MODULES_END should be clear of
1262      * anything at this stage. */
1263     xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1264 #endif
1265 }
1266 #endif
1267 
1268 static void __init xen_pagetable_p2m_setup(void)
1269 {
1270     if (xen_feature(XENFEAT_auto_translated_physmap))
1271         return;
1272 
1273     xen_vmalloc_p2m_tree();
1274 
1275 #ifdef CONFIG_X86_64
1276     xen_pagetable_p2m_free();
1277 
1278     xen_pagetable_cleanhighmap();
1279 #endif
1280     /* And revector! Bye bye old array */
1281     xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1282 }
1283 
1284 static void __init xen_pagetable_init(void)
1285 {
1286     paging_init();
1287     xen_post_allocator_init();
1288 
1289     xen_pagetable_p2m_setup();
1290 
1291     /* Allocate and initialize top and mid mfn levels for p2m structure */
1292     xen_build_mfn_list_list();
1293 
1294     /* Remap memory freed due to conflicts with E820 map */
1295     if (!xen_feature(XENFEAT_auto_translated_physmap))
1296         xen_remap_memory();
1297 
1298     xen_setup_shared_info();
1299 }
1300 static void xen_write_cr2(unsigned long cr2)
1301 {
1302     this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1303 }
1304 
1305 static unsigned long xen_read_cr2(void)
1306 {
1307     return this_cpu_read(xen_vcpu)->arch.cr2;
1308 }
1309 
1310 unsigned long xen_read_cr2_direct(void)
1311 {
1312     return this_cpu_read(xen_vcpu_info.arch.cr2);
1313 }
1314 
1315 void xen_flush_tlb_all(void)
1316 {
1317     struct mmuext_op *op;
1318     struct multicall_space mcs;
1319 
1320     trace_xen_mmu_flush_tlb_all(0);
1321 
1322     preempt_disable();
1323 
1324     mcs = xen_mc_entry(sizeof(*op));
1325 
1326     op = mcs.args;
1327     op->cmd = MMUEXT_TLB_FLUSH_ALL;
1328     MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1329 
1330     xen_mc_issue(PARAVIRT_LAZY_MMU);
1331 
1332     preempt_enable();
1333 }
1334 static void xen_flush_tlb(void)
1335 {
1336     struct mmuext_op *op;
1337     struct multicall_space mcs;
1338 
1339     trace_xen_mmu_flush_tlb(0);
1340 
1341     preempt_disable();
1342 
1343     mcs = xen_mc_entry(sizeof(*op));
1344 
1345     op = mcs.args;
1346     op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1347     MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1348 
1349     xen_mc_issue(PARAVIRT_LAZY_MMU);
1350 
1351     preempt_enable();
1352 }
1353 
1354 static void xen_flush_tlb_single(unsigned long addr)
1355 {
1356     struct mmuext_op *op;
1357     struct multicall_space mcs;
1358 
1359     trace_xen_mmu_flush_tlb_single(addr);
1360 
1361     preempt_disable();
1362 
1363     mcs = xen_mc_entry(sizeof(*op));
1364     op = mcs.args;
1365     op->cmd = MMUEXT_INVLPG_LOCAL;
1366     op->arg1.linear_addr = addr & PAGE_MASK;
1367     MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1368 
1369     xen_mc_issue(PARAVIRT_LAZY_MMU);
1370 
1371     preempt_enable();
1372 }
1373 
1374 static void xen_flush_tlb_others(const struct cpumask *cpus,
1375                  struct mm_struct *mm, unsigned long start,
1376                  unsigned long end)
1377 {
1378     struct {
1379         struct mmuext_op op;
1380 #ifdef CONFIG_SMP
1381         DECLARE_BITMAP(mask, num_processors);
1382 #else
1383         DECLARE_BITMAP(mask, NR_CPUS);
1384 #endif
1385     } *args;
1386     struct multicall_space mcs;
1387 
1388     trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
1389 
1390     if (cpumask_empty(cpus))
1391         return;     /* nothing to do */
1392 
1393     mcs = xen_mc_entry(sizeof(*args));
1394     args = mcs.args;
1395     args->op.arg2.vcpumask = to_cpumask(args->mask);
1396 
1397     /* Remove us, and any offline CPUS. */
1398     cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1399     cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1400 
1401     args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1402     if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1403         args->op.cmd = MMUEXT_INVLPG_MULTI;
1404         args->op.arg1.linear_addr = start;
1405     }
1406 
1407     MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1408 
1409     xen_mc_issue(PARAVIRT_LAZY_MMU);
1410 }
1411 
1412 static unsigned long xen_read_cr3(void)
1413 {
1414     return this_cpu_read(xen_cr3);
1415 }
1416 
1417 static void set_current_cr3(void *v)
1418 {
1419     this_cpu_write(xen_current_cr3, (unsigned long)v);
1420 }
1421 
1422 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1423 {
1424     struct mmuext_op op;
1425     unsigned long mfn;
1426 
1427     trace_xen_mmu_write_cr3(kernel, cr3);
1428 
1429     if (cr3)
1430         mfn = pfn_to_mfn(PFN_DOWN(cr3));
1431     else
1432         mfn = 0;
1433 
1434     WARN_ON(mfn == 0 && kernel);
1435 
1436     op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1437     op.arg1.mfn = mfn;
1438 
1439     xen_extend_mmuext_op(&op);
1440 
1441     if (kernel) {
1442         this_cpu_write(xen_cr3, cr3);
1443 
1444         /* Update xen_current_cr3 once the batch has actually
1445            been submitted. */
1446         xen_mc_callback(set_current_cr3, (void *)cr3);
1447     }
1448 }
1449 static void xen_write_cr3(unsigned long cr3)
1450 {
1451     BUG_ON(preemptible());
1452 
1453     xen_mc_batch();  /* disables interrupts */
1454 
1455     /* Update while interrupts are disabled, so its atomic with
1456        respect to ipis */
1457     this_cpu_write(xen_cr3, cr3);
1458 
1459     __xen_write_cr3(true, cr3);
1460 
1461 #ifdef CONFIG_X86_64
1462     {
1463         pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1464         if (user_pgd)
1465             __xen_write_cr3(false, __pa(user_pgd));
1466         else
1467             __xen_write_cr3(false, 0);
1468     }
1469 #endif
1470 
1471     xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1472 }
1473 
1474 #ifdef CONFIG_X86_64
1475 /*
1476  * At the start of the day - when Xen launches a guest, it has already
1477  * built pagetables for the guest. We diligently look over them
1478  * in xen_setup_kernel_pagetable and graft as appropriate them in the
1479  * init_level4_pgt and its friends. Then when we are happy we load
1480  * the new init_level4_pgt - and continue on.
1481  *
1482  * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1483  * up the rest of the pagetables. When it has completed it loads the cr3.
1484  * N.B. that baremetal would start at 'start_kernel' (and the early
1485  * #PF handler would create bootstrap pagetables) - so we are running
1486  * with the same assumptions as what to do when write_cr3 is executed
1487  * at this point.
1488  *
1489  * Since there are no user-page tables at all, we have two variants
1490  * of xen_write_cr3 - the early bootup (this one), and the late one
1491  * (xen_write_cr3). The reason we have to do that is that in 64-bit
1492  * the Linux kernel and user-space are both in ring 3 while the
1493  * hypervisor is in ring 0.
1494  */
1495 static void __init xen_write_cr3_init(unsigned long cr3)
1496 {
1497     BUG_ON(preemptible());
1498 
1499     xen_mc_batch();  /* disables interrupts */
1500 
1501     /* Update while interrupts are disabled, so its atomic with
1502        respect to ipis */
1503     this_cpu_write(xen_cr3, cr3);
1504 
1505     __xen_write_cr3(true, cr3);
1506 
1507     xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1508 }
1509 #endif
1510 
1511 static int xen_pgd_alloc(struct mm_struct *mm)
1512 {
1513     pgd_t *pgd = mm->pgd;
1514     int ret = 0;
1515 
1516     BUG_ON(PagePinned(virt_to_page(pgd)));
1517 
1518 #ifdef CONFIG_X86_64
1519     {
1520         struct page *page = virt_to_page(pgd);
1521         pgd_t *user_pgd;
1522 
1523         BUG_ON(page->private != 0);
1524 
1525         ret = -ENOMEM;
1526 
1527         user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1528         page->private = (unsigned long)user_pgd;
1529 
1530         if (user_pgd != NULL) {
1531 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1532             user_pgd[pgd_index(VSYSCALL_ADDR)] =
1533                 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1534 #endif
1535             ret = 0;
1536         }
1537 
1538         BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1539     }
1540 #endif
1541 
1542     return ret;
1543 }
1544 
1545 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1546 {
1547 #ifdef CONFIG_X86_64
1548     pgd_t *user_pgd = xen_get_user_pgd(pgd);
1549 
1550     if (user_pgd)
1551         free_page((unsigned long)user_pgd);
1552 #endif
1553 }
1554 
1555 /*
1556  * Init-time set_pte while constructing initial pagetables, which
1557  * doesn't allow RO page table pages to be remapped RW.
1558  *
1559  * If there is no MFN for this PFN then this page is initially
1560  * ballooned out so clear the PTE (as in decrease_reservation() in
1561  * drivers/xen/balloon.c).
1562  *
1563  * Many of these PTE updates are done on unpinned and writable pages
1564  * and doing a hypercall for these is unnecessary and expensive.  At
1565  * this point it is not possible to tell if a page is pinned or not,
1566  * so always write the PTE directly and rely on Xen trapping and
1567  * emulating any updates as necessary.
1568  */
1569 __visible pte_t xen_make_pte_init(pteval_t pte)
1570 {
1571 #ifdef CONFIG_X86_64
1572     unsigned long pfn;
1573 
1574     /*
1575      * Pages belonging to the initial p2m list mapped outside the default
1576      * address range must be mapped read-only. This region contains the
1577      * page tables for mapping the p2m list, too, and page tables MUST be
1578      * mapped read-only.
1579      */
1580     pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1581     if (xen_start_info->mfn_list < __START_KERNEL_map &&
1582         pfn >= xen_start_info->first_p2m_pfn &&
1583         pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1584         pte &= ~_PAGE_RW;
1585 #endif
1586     pte = pte_pfn_to_mfn(pte);
1587     return native_make_pte(pte);
1588 }
1589 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1590 
1591 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1592 {
1593 #ifdef CONFIG_X86_32
1594     /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1595     if (pte_mfn(pte) != INVALID_P2M_ENTRY
1596         && pte_val_ma(*ptep) & _PAGE_PRESENT)
1597         pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1598                    pte_val_ma(pte));
1599 #endif
1600     native_set_pte(ptep, pte);
1601 }
1602 
1603 /* Early in boot, while setting up the initial pagetable, assume
1604    everything is pinned. */
1605 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1606 {
1607 #ifdef CONFIG_FLATMEM
1608     BUG_ON(mem_map);    /* should only be used early */
1609 #endif
1610     make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1611     pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1612 }
1613 
1614 /* Used for pmd and pud */
1615 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1616 {
1617 #ifdef CONFIG_FLATMEM
1618     BUG_ON(mem_map);    /* should only be used early */
1619 #endif
1620     make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1621 }
1622 
1623 /* Early release_pte assumes that all pts are pinned, since there's
1624    only init_mm and anything attached to that is pinned. */
1625 static void __init xen_release_pte_init(unsigned long pfn)
1626 {
1627     pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1628     make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1629 }
1630 
1631 static void __init xen_release_pmd_init(unsigned long pfn)
1632 {
1633     make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1634 }
1635 
1636 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1637 {
1638     struct multicall_space mcs;
1639     struct mmuext_op *op;
1640 
1641     mcs = __xen_mc_entry(sizeof(*op));
1642     op = mcs.args;
1643     op->cmd = cmd;
1644     op->arg1.mfn = pfn_to_mfn(pfn);
1645 
1646     MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1647 }
1648 
1649 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1650 {
1651     struct multicall_space mcs;
1652     unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1653 
1654     mcs = __xen_mc_entry(0);
1655     MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1656                 pfn_pte(pfn, prot), 0);
1657 }
1658 
1659 /* This needs to make sure the new pte page is pinned iff its being
1660    attached to a pinned pagetable. */
1661 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1662                     unsigned level)
1663 {
1664     bool pinned = PagePinned(virt_to_page(mm->pgd));
1665 
1666     trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1667 
1668     if (pinned) {
1669         struct page *page = pfn_to_page(pfn);
1670 
1671         SetPagePinned(page);
1672 
1673         if (!PageHighMem(page)) {
1674             xen_mc_batch();
1675 
1676             __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1677 
1678             if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1679                 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1680 
1681             xen_mc_issue(PARAVIRT_LAZY_MMU);
1682         } else {
1683             /* make sure there are no stray mappings of
1684                this page */
1685             kmap_flush_unused();
1686         }
1687     }
1688 }
1689 
1690 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1691 {
1692     xen_alloc_ptpage(mm, pfn, PT_PTE);
1693 }
1694 
1695 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1696 {
1697     xen_alloc_ptpage(mm, pfn, PT_PMD);
1698 }
1699 
1700 /* This should never happen until we're OK to use struct page */
1701 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1702 {
1703     struct page *page = pfn_to_page(pfn);
1704     bool pinned = PagePinned(page);
1705 
1706     trace_xen_mmu_release_ptpage(pfn, level, pinned);
1707 
1708     if (pinned) {
1709         if (!PageHighMem(page)) {
1710             xen_mc_batch();
1711 
1712             if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1713                 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1714 
1715             __set_pfn_prot(pfn, PAGE_KERNEL);
1716 
1717             xen_mc_issue(PARAVIRT_LAZY_MMU);
1718         }
1719         ClearPagePinned(page);
1720     }
1721 }
1722 
1723 static void xen_release_pte(unsigned long pfn)
1724 {
1725     xen_release_ptpage(pfn, PT_PTE);
1726 }
1727 
1728 static void xen_release_pmd(unsigned long pfn)
1729 {
1730     xen_release_ptpage(pfn, PT_PMD);
1731 }
1732 
1733 #if CONFIG_PGTABLE_LEVELS == 4
1734 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1735 {
1736     xen_alloc_ptpage(mm, pfn, PT_PUD);
1737 }
1738 
1739 static void xen_release_pud(unsigned long pfn)
1740 {
1741     xen_release_ptpage(pfn, PT_PUD);
1742 }
1743 #endif
1744 
1745 void __init xen_reserve_top(void)
1746 {
1747 #ifdef CONFIG_X86_32
1748     unsigned long top = HYPERVISOR_VIRT_START;
1749     struct xen_platform_parameters pp;
1750 
1751     if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1752         top = pp.virt_start;
1753 
1754     reserve_top_address(-top);
1755 #endif  /* CONFIG_X86_32 */
1756 }
1757 
1758 /*
1759  * Like __va(), but returns address in the kernel mapping (which is
1760  * all we have until the physical memory mapping has been set up.
1761  */
1762 static void * __init __ka(phys_addr_t paddr)
1763 {
1764 #ifdef CONFIG_X86_64
1765     return (void *)(paddr + __START_KERNEL_map);
1766 #else
1767     return __va(paddr);
1768 #endif
1769 }
1770 
1771 /* Convert a machine address to physical address */
1772 static unsigned long __init m2p(phys_addr_t maddr)
1773 {
1774     phys_addr_t paddr;
1775 
1776     maddr &= PTE_PFN_MASK;
1777     paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1778 
1779     return paddr;
1780 }
1781 
1782 /* Convert a machine address to kernel virtual */
1783 static void * __init m2v(phys_addr_t maddr)
1784 {
1785     return __ka(m2p(maddr));
1786 }
1787 
1788 /* Set the page permissions on an identity-mapped pages */
1789 static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1790                        unsigned long flags)
1791 {
1792     unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1793     pte_t pte = pfn_pte(pfn, prot);
1794 
1795     /* For PVH no need to set R/O or R/W to pin them or unpin them. */
1796     if (xen_feature(XENFEAT_auto_translated_physmap))
1797         return;
1798 
1799     if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1800         BUG();
1801 }
1802 static void __init set_page_prot(void *addr, pgprot_t prot)
1803 {
1804     return set_page_prot_flags(addr, prot, UVMF_NONE);
1805 }
1806 #ifdef CONFIG_X86_32
1807 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1808 {
1809     unsigned pmdidx, pteidx;
1810     unsigned ident_pte;
1811     unsigned long pfn;
1812 
1813     level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1814                       PAGE_SIZE);
1815 
1816     ident_pte = 0;
1817     pfn = 0;
1818     for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1819         pte_t *pte_page;
1820 
1821         /* Reuse or allocate a page of ptes */
1822         if (pmd_present(pmd[pmdidx]))
1823             pte_page = m2v(pmd[pmdidx].pmd);
1824         else {
1825             /* Check for free pte pages */
1826             if (ident_pte == LEVEL1_IDENT_ENTRIES)
1827                 break;
1828 
1829             pte_page = &level1_ident_pgt[ident_pte];
1830             ident_pte += PTRS_PER_PTE;
1831 
1832             pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1833         }
1834 
1835         /* Install mappings */
1836         for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1837             pte_t pte;
1838 
1839             if (pfn > max_pfn_mapped)
1840                 max_pfn_mapped = pfn;
1841 
1842             if (!pte_none(pte_page[pteidx]))
1843                 continue;
1844 
1845             pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1846             pte_page[pteidx] = pte;
1847         }
1848     }
1849 
1850     for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1851         set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1852 
1853     set_page_prot(pmd, PAGE_KERNEL_RO);
1854 }
1855 #endif
1856 void __init xen_setup_machphys_mapping(void)
1857 {
1858     struct xen_machphys_mapping mapping;
1859 
1860     if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1861         machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1862         machine_to_phys_nr = mapping.max_mfn + 1;
1863     } else {
1864         machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1865     }
1866 #ifdef CONFIG_X86_32
1867     WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1868         < machine_to_phys_mapping);
1869 #endif
1870 }
1871 
1872 #ifdef CONFIG_X86_64
1873 static void __init convert_pfn_mfn(void *v)
1874 {
1875     pte_t *pte = v;
1876     int i;
1877 
1878     /* All levels are converted the same way, so just treat them
1879        as ptes. */
1880     for (i = 0; i < PTRS_PER_PTE; i++)
1881         pte[i] = xen_make_pte(pte[i].pte);
1882 }
1883 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1884                  unsigned long addr)
1885 {
1886     if (*pt_base == PFN_DOWN(__pa(addr))) {
1887         set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1888         clear_page((void *)addr);
1889         (*pt_base)++;
1890     }
1891     if (*pt_end == PFN_DOWN(__pa(addr))) {
1892         set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1893         clear_page((void *)addr);
1894         (*pt_end)--;
1895     }
1896 }
1897 /*
1898  * Set up the initial kernel pagetable.
1899  *
1900  * We can construct this by grafting the Xen provided pagetable into
1901  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
1902  * level2_ident_pgt, and level2_kernel_pgt.  This means that only the
1903  * kernel has a physical mapping to start with - but that's enough to
1904  * get __va working.  We need to fill in the rest of the physical
1905  * mapping once some sort of allocator has been set up.  NOTE: for
1906  * PVH, the page tables are native.
1907  */
1908 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1909 {
1910     pud_t *l3;
1911     pmd_t *l2;
1912     unsigned long addr[3];
1913     unsigned long pt_base, pt_end;
1914     unsigned i;
1915 
1916     /* max_pfn_mapped is the last pfn mapped in the initial memory
1917      * mappings. Considering that on Xen after the kernel mappings we
1918      * have the mappings of some pages that don't exist in pfn space, we
1919      * set max_pfn_mapped to the last real pfn mapped. */
1920     if (xen_start_info->mfn_list < __START_KERNEL_map)
1921         max_pfn_mapped = xen_start_info->first_p2m_pfn;
1922     else
1923         max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1924 
1925     pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1926     pt_end = pt_base + xen_start_info->nr_pt_frames;
1927 
1928     /* Zap identity mapping */
1929     init_level4_pgt[0] = __pgd(0);
1930 
1931     if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1932         /* Pre-constructed entries are in pfn, so convert to mfn */
1933         /* L4[272] -> level3_ident_pgt
1934          * L4[511] -> level3_kernel_pgt */
1935         convert_pfn_mfn(init_level4_pgt);
1936 
1937         /* L3_i[0] -> level2_ident_pgt */
1938         convert_pfn_mfn(level3_ident_pgt);
1939         /* L3_k[510] -> level2_kernel_pgt
1940          * L3_k[511] -> level2_fixmap_pgt */
1941         convert_pfn_mfn(level3_kernel_pgt);
1942 
1943         /* L3_k[511][506] -> level1_fixmap_pgt */
1944         convert_pfn_mfn(level2_fixmap_pgt);
1945     }
1946     /* We get [511][511] and have Xen's version of level2_kernel_pgt */
1947     l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1948     l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1949 
1950     addr[0] = (unsigned long)pgd;
1951     addr[1] = (unsigned long)l3;
1952     addr[2] = (unsigned long)l2;
1953     /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1954      * Both L4[272][0] and L4[511][510] have entries that point to the same
1955      * L2 (PMD) tables. Meaning that if you modify it in __va space
1956      * it will be also modified in the __ka space! (But if you just
1957      * modify the PMD table to point to other PTE's or none, then you
1958      * are OK - which is what cleanup_highmap does) */
1959     copy_page(level2_ident_pgt, l2);
1960     /* Graft it onto L4[511][510] */
1961     copy_page(level2_kernel_pgt, l2);
1962 
1963     /* Copy the initial P->M table mappings if necessary. */
1964     i = pgd_index(xen_start_info->mfn_list);
1965     if (i && i < pgd_index(__START_KERNEL_map))
1966         init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1967 
1968     if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1969         /* Make pagetable pieces RO */
1970         set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1971         set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1972         set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1973         set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1974         set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1975         set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1976         set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1977         set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1978 
1979         /* Pin down new L4 */
1980         pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1981                   PFN_DOWN(__pa_symbol(init_level4_pgt)));
1982 
1983         /* Unpin Xen-provided one */
1984         pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1985 
1986         /*
1987          * At this stage there can be no user pgd, and no page
1988          * structure to attach it to, so make sure we just set kernel
1989          * pgd.
1990          */
1991         xen_mc_batch();
1992         __xen_write_cr3(true, __pa(init_level4_pgt));
1993         xen_mc_issue(PARAVIRT_LAZY_CPU);
1994     } else
1995         native_write_cr3(__pa(init_level4_pgt));
1996 
1997     /* We can't that easily rip out L3 and L2, as the Xen pagetables are
1998      * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ...  for
1999      * the initial domain. For guests using the toolstack, they are in:
2000      * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
2001      * rip out the [L4] (pgd), but for guests we shave off three pages.
2002      */
2003     for (i = 0; i < ARRAY_SIZE(addr); i++)
2004         check_pt_base(&pt_base, &pt_end, addr[i]);
2005 
2006     /* Our (by three pages) smaller Xen pagetable that we are using */
2007     xen_pt_base = PFN_PHYS(pt_base);
2008     xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
2009     memblock_reserve(xen_pt_base, xen_pt_size);
2010 
2011     /* Revector the xen_start_info */
2012     xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
2013 }
2014 
2015 /*
2016  * Read a value from a physical address.
2017  */
2018 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
2019 {
2020     unsigned long *vaddr;
2021     unsigned long val;
2022 
2023     vaddr = early_memremap_ro(addr, sizeof(val));
2024     val = *vaddr;
2025     early_memunmap(vaddr, sizeof(val));
2026     return val;
2027 }
2028 
2029 /*
2030  * Translate a virtual address to a physical one without relying on mapped
2031  * page tables.
2032  */
2033 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2034 {
2035     phys_addr_t pa;
2036     pgd_t pgd;
2037     pud_t pud;
2038     pmd_t pmd;
2039     pte_t pte;
2040 
2041     pa = read_cr3();
2042     pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2043                                sizeof(pgd)));
2044     if (!pgd_present(pgd))
2045         return 0;
2046 
2047     pa = pgd_val(pgd) & PTE_PFN_MASK;
2048     pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2049                                sizeof(pud)));
2050     if (!pud_present(pud))
2051         return 0;
2052     pa = pud_pfn(pud) << PAGE_SHIFT;
2053     if (pud_large(pud))
2054         return pa + (vaddr & ~PUD_MASK);
2055 
2056     pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2057                                sizeof(pmd)));
2058     if (!pmd_present(pmd))
2059         return 0;
2060     pa = pmd_pfn(pmd) << PAGE_SHIFT;
2061     if (pmd_large(pmd))
2062         return pa + (vaddr & ~PMD_MASK);
2063 
2064     pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2065                                sizeof(pte)));
2066     if (!pte_present(pte))
2067         return 0;
2068     pa = pte_pfn(pte) << PAGE_SHIFT;
2069 
2070     return pa | (vaddr & ~PAGE_MASK);
2071 }
2072 
2073 /*
2074  * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2075  * this area.
2076  */
2077 void __init xen_relocate_p2m(void)
2078 {
2079     phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
2080     unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2081     int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
2082     pte_t *pt;
2083     pmd_t *pmd;
2084     pud_t *pud;
2085     pgd_t *pgd;
2086     unsigned long *new_p2m;
2087 
2088     size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2089     n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2090     n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2091     n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2092     n_pud = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
2093     n_frames = n_pte + n_pt + n_pmd + n_pud;
2094 
2095     new_area = xen_find_free_area(PFN_PHYS(n_frames));
2096     if (!new_area) {
2097         xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2098         BUG();
2099     }
2100 
2101     /*
2102      * Setup the page tables for addressing the new p2m list.
2103      * We have asked the hypervisor to map the p2m list at the user address
2104      * PUD_SIZE. It may have done so, or it may have used a kernel space
2105      * address depending on the Xen version.
2106      * To avoid any possible virtual address collision, just use
2107      * 2 * PUD_SIZE for the new area.
2108      */
2109     pud_phys = new_area;
2110     pmd_phys = pud_phys + PFN_PHYS(n_pud);
2111     pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2112     p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2113 
2114     pgd = __va(read_cr3());
2115     new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2116     for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2117         pud = early_memremap(pud_phys, PAGE_SIZE);
2118         clear_page(pud);
2119         for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2120              idx_pmd++) {
2121             pmd = early_memremap(pmd_phys, PAGE_SIZE);
2122             clear_page(pmd);
2123             for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2124                  idx_pt++) {
2125                 pt = early_memremap(pt_phys, PAGE_SIZE);
2126                 clear_page(pt);
2127                 for (idx_pte = 0;
2128                      idx_pte < min(n_pte, PTRS_PER_PTE);
2129                      idx_pte++) {
2130                     set_pte(pt + idx_pte,
2131                         pfn_pte(p2m_pfn, PAGE_KERNEL));
2132                     p2m_pfn++;
2133                 }
2134                 n_pte -= PTRS_PER_PTE;
2135                 early_memunmap(pt, PAGE_SIZE);
2136                 make_lowmem_page_readonly(__va(pt_phys));
2137                 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2138                           PFN_DOWN(pt_phys));
2139                 set_pmd(pmd + idx_pt,
2140                     __pmd(_PAGE_TABLE | pt_phys));
2141                 pt_phys += PAGE_SIZE;
2142             }
2143             n_pt -= PTRS_PER_PMD;
2144             early_memunmap(pmd, PAGE_SIZE);
2145             make_lowmem_page_readonly(__va(pmd_phys));
2146             pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2147                       PFN_DOWN(pmd_phys));
2148             set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2149             pmd_phys += PAGE_SIZE;
2150         }
2151         n_pmd -= PTRS_PER_PUD;
2152         early_memunmap(pud, PAGE_SIZE);
2153         make_lowmem_page_readonly(__va(pud_phys));
2154         pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2155         set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2156         pud_phys += PAGE_SIZE;
2157     }
2158 
2159     /* Now copy the old p2m info to the new area. */
2160     memcpy(new_p2m, xen_p2m_addr, size);
2161     xen_p2m_addr = new_p2m;
2162 
2163     /* Release the old p2m list and set new list info. */
2164     p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2165     BUG_ON(!p2m_pfn);
2166     p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2167 
2168     if (xen_start_info->mfn_list < __START_KERNEL_map) {
2169         pfn = xen_start_info->first_p2m_pfn;
2170         pfn_end = xen_start_info->first_p2m_pfn +
2171               xen_start_info->nr_p2m_frames;
2172         set_pgd(pgd + 1, __pgd(0));
2173     } else {
2174         pfn = p2m_pfn;
2175         pfn_end = p2m_pfn_end;
2176     }
2177 
2178     memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2179     while (pfn < pfn_end) {
2180         if (pfn == p2m_pfn) {
2181             pfn = p2m_pfn_end;
2182             continue;
2183         }
2184         make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2185         pfn++;
2186     }
2187 
2188     xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2189     xen_start_info->first_p2m_pfn =  PFN_DOWN(new_area);
2190     xen_start_info->nr_p2m_frames = n_frames;
2191 }
2192 
2193 #else   /* !CONFIG_X86_64 */
2194 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2195 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2196 
2197 static void __init xen_write_cr3_init(unsigned long cr3)
2198 {
2199     unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2200 
2201     BUG_ON(read_cr3() != __pa(initial_page_table));
2202     BUG_ON(cr3 != __pa(swapper_pg_dir));
2203 
2204     /*
2205      * We are switching to swapper_pg_dir for the first time (from
2206      * initial_page_table) and therefore need to mark that page
2207      * read-only and then pin it.
2208      *
2209      * Xen disallows sharing of kernel PMDs for PAE
2210      * guests. Therefore we must copy the kernel PMD from
2211      * initial_page_table into a new kernel PMD to be used in
2212      * swapper_pg_dir.
2213      */
2214     swapper_kernel_pmd =
2215         extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2216     copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2217     swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2218         __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2219     set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2220 
2221     set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2222     xen_write_cr3(cr3);
2223     pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2224 
2225     pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2226               PFN_DOWN(__pa(initial_page_table)));
2227     set_page_prot(initial_page_table, PAGE_KERNEL);
2228     set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2229 
2230     pv_mmu_ops.write_cr3 = &xen_write_cr3;
2231 }
2232 
2233 /*
2234  * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2235  * not the first page table in the page table pool.
2236  * Iterate through the initial page tables to find the real page table base.
2237  */
2238 static phys_addr_t xen_find_pt_base(pmd_t *pmd)
2239 {
2240     phys_addr_t pt_base, paddr;
2241     unsigned pmdidx;
2242 
2243     pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2244 
2245     for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2246         if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2247             paddr = m2p(pmd[pmdidx].pmd);
2248             pt_base = min(pt_base, paddr);
2249         }
2250 
2251     return pt_base;
2252 }
2253 
2254 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2255 {
2256     pmd_t *kernel_pmd;
2257 
2258     kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2259 
2260     xen_pt_base = xen_find_pt_base(kernel_pmd);
2261     xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2262 
2263     initial_kernel_pmd =
2264         extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2265 
2266     max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2267 
2268     copy_page(initial_kernel_pmd, kernel_pmd);
2269 
2270     xen_map_identity_early(initial_kernel_pmd, max_pfn);
2271 
2272     copy_page(initial_page_table, pgd);
2273     initial_page_table[KERNEL_PGD_BOUNDARY] =
2274         __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2275 
2276     set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2277     set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2278     set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2279 
2280     pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2281 
2282     pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2283               PFN_DOWN(__pa(initial_page_table)));
2284     xen_write_cr3(__pa(initial_page_table));
2285 
2286     memblock_reserve(xen_pt_base, xen_pt_size);
2287 }
2288 #endif  /* CONFIG_X86_64 */
2289 
2290 void __init xen_reserve_special_pages(void)
2291 {
2292     phys_addr_t paddr;
2293 
2294     memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2295     if (xen_start_info->store_mfn) {
2296         paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2297         memblock_reserve(paddr, PAGE_SIZE);
2298     }
2299     if (!xen_initial_domain()) {
2300         paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2301         memblock_reserve(paddr, PAGE_SIZE);
2302     }
2303 }
2304 
2305 void __init xen_pt_check_e820(void)
2306 {
2307     if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2308         xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2309         BUG();
2310     }
2311 }
2312 
2313 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2314 
2315 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2316 {
2317     pte_t pte;
2318 
2319     phys >>= PAGE_SHIFT;
2320 
2321     switch (idx) {
2322     case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2323     case FIX_RO_IDT:
2324 #ifdef CONFIG_X86_32
2325     case FIX_WP_TEST:
2326 # ifdef CONFIG_HIGHMEM
2327     case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2328 # endif
2329 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2330     case VSYSCALL_PAGE:
2331 #endif
2332     case FIX_TEXT_POKE0:
2333     case FIX_TEXT_POKE1:
2334         /* All local page mappings */
2335         pte = pfn_pte(phys, prot);
2336         break;
2337 
2338 #ifdef CONFIG_X86_LOCAL_APIC
2339     case FIX_APIC_BASE: /* maps dummy local APIC */
2340         pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2341         break;
2342 #endif
2343 
2344 #ifdef CONFIG_X86_IO_APIC
2345     case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2346         /*
2347          * We just don't map the IO APIC - all access is via
2348          * hypercalls.  Keep the address in the pte for reference.
2349          */
2350         pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2351         break;
2352 #endif
2353 
2354     case FIX_PARAVIRT_BOOTMAP:
2355         /* This is an MFN, but it isn't an IO mapping from the
2356            IO domain */
2357         pte = mfn_pte(phys, prot);
2358         break;
2359 
2360     default:
2361         /* By default, set_fixmap is used for hardware mappings */
2362         pte = mfn_pte(phys, prot);
2363         break;
2364     }
2365 
2366     __native_set_fixmap(idx, pte);
2367 
2368 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2369     /* Replicate changes to map the vsyscall page into the user
2370        pagetable vsyscall mapping. */
2371     if (idx == VSYSCALL_PAGE) {
2372         unsigned long vaddr = __fix_to_virt(idx);
2373         set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2374     }
2375 #endif
2376 }
2377 
2378 static void __init xen_post_allocator_init(void)
2379 {
2380     if (xen_feature(XENFEAT_auto_translated_physmap))
2381         return;
2382 
2383     pv_mmu_ops.set_pte = xen_set_pte;
2384     pv_mmu_ops.set_pmd = xen_set_pmd;
2385     pv_mmu_ops.set_pud = xen_set_pud;
2386 #if CONFIG_PGTABLE_LEVELS == 4
2387     pv_mmu_ops.set_pgd = xen_set_pgd;
2388 #endif
2389 
2390     /* This will work as long as patching hasn't happened yet
2391        (which it hasn't) */
2392     pv_mmu_ops.alloc_pte = xen_alloc_pte;
2393     pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2394     pv_mmu_ops.release_pte = xen_release_pte;
2395     pv_mmu_ops.release_pmd = xen_release_pmd;
2396 #if CONFIG_PGTABLE_LEVELS == 4
2397     pv_mmu_ops.alloc_pud = xen_alloc_pud;
2398     pv_mmu_ops.release_pud = xen_release_pud;
2399 #endif
2400     pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2401 
2402 #ifdef CONFIG_X86_64
2403     pv_mmu_ops.write_cr3 = &xen_write_cr3;
2404     SetPagePinned(virt_to_page(level3_user_vsyscall));
2405 #endif
2406     xen_mark_init_mm_pinned();
2407 }
2408 
2409 static void xen_leave_lazy_mmu(void)
2410 {
2411     preempt_disable();
2412     xen_mc_flush();
2413     paravirt_leave_lazy_mmu();
2414     preempt_enable();
2415 }
2416 
2417 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2418     .read_cr2 = xen_read_cr2,
2419     .write_cr2 = xen_write_cr2,
2420 
2421     .read_cr3 = xen_read_cr3,
2422     .write_cr3 = xen_write_cr3_init,
2423 
2424     .flush_tlb_user = xen_flush_tlb,
2425     .flush_tlb_kernel = xen_flush_tlb,
2426     .flush_tlb_single = xen_flush_tlb_single,
2427     .flush_tlb_others = xen_flush_tlb_others,
2428 
2429     .pte_update = paravirt_nop,
2430 
2431     .pgd_alloc = xen_pgd_alloc,
2432     .pgd_free = xen_pgd_free,
2433 
2434     .alloc_pte = xen_alloc_pte_init,
2435     .release_pte = xen_release_pte_init,
2436     .alloc_pmd = xen_alloc_pmd_init,
2437     .release_pmd = xen_release_pmd_init,
2438 
2439     .set_pte = xen_set_pte_init,
2440     .set_pte_at = xen_set_pte_at,
2441     .set_pmd = xen_set_pmd_hyper,
2442 
2443     .ptep_modify_prot_start = __ptep_modify_prot_start,
2444     .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2445 
2446     .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2447     .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2448 
2449     .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2450     .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2451 
2452 #ifdef CONFIG_X86_PAE
2453     .set_pte_atomic = xen_set_pte_atomic,
2454     .pte_clear = xen_pte_clear,
2455     .pmd_clear = xen_pmd_clear,
2456 #endif  /* CONFIG_X86_PAE */
2457     .set_pud = xen_set_pud_hyper,
2458 
2459     .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2460     .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2461 
2462 #if CONFIG_PGTABLE_LEVELS == 4
2463     .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2464     .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2465     .set_pgd = xen_set_pgd_hyper,
2466 
2467     .alloc_pud = xen_alloc_pmd_init,
2468     .release_pud = xen_release_pmd_init,
2469 #endif  /* CONFIG_PGTABLE_LEVELS == 4 */
2470 
2471     .activate_mm = xen_activate_mm,
2472     .dup_mmap = xen_dup_mmap,
2473     .exit_mmap = xen_exit_mmap,
2474 
2475     .lazy_mode = {
2476         .enter = paravirt_enter_lazy_mmu,
2477         .leave = xen_leave_lazy_mmu,
2478         .flush = paravirt_flush_lazy_mmu,
2479     },
2480 
2481     .set_fixmap = xen_set_fixmap,
2482 };
2483 
2484 void __init xen_init_mmu_ops(void)
2485 {
2486     x86_init.paging.pagetable_init = xen_pagetable_init;
2487 
2488     if (xen_feature(XENFEAT_auto_translated_physmap))
2489         return;
2490 
2491     pv_mmu_ops = xen_mmu_ops;
2492 
2493     memset(dummy_mapping, 0xff, PAGE_SIZE);
2494 }
2495 
2496 /* Protected by xen_reservation_lock. */
2497 #define MAX_CONTIG_ORDER 9 /* 2MB */
2498 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2499 
2500 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2501 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2502                 unsigned long *in_frames,
2503                 unsigned long *out_frames)
2504 {
2505     int i;
2506     struct multicall_space mcs;
2507 
2508     xen_mc_batch();
2509     for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2510         mcs = __xen_mc_entry(0);
2511 
2512         if (in_frames)
2513             in_frames[i] = virt_to_mfn(vaddr);
2514 
2515         MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2516         __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2517 
2518         if (out_frames)
2519             out_frames[i] = virt_to_pfn(vaddr);
2520     }
2521     xen_mc_issue(0);
2522 }
2523 
2524 /*
2525  * Update the pfn-to-mfn mappings for a virtual address range, either to
2526  * point to an array of mfns, or contiguously from a single starting
2527  * mfn.
2528  */
2529 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2530                      unsigned long *mfns,
2531                      unsigned long first_mfn)
2532 {
2533     unsigned i, limit;
2534     unsigned long mfn;
2535 
2536     xen_mc_batch();
2537 
2538     limit = 1u << order;
2539     for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2540         struct multicall_space mcs;
2541         unsigned flags;
2542 
2543         mcs = __xen_mc_entry(0);
2544         if (mfns)
2545             mfn = mfns[i];
2546         else
2547             mfn = first_mfn + i;
2548 
2549         if (i < (limit - 1))
2550             flags = 0;
2551         else {
2552             if (order == 0)
2553                 flags = UVMF_INVLPG | UVMF_ALL;
2554             else
2555                 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2556         }
2557 
2558         MULTI_update_va_mapping(mcs.mc, vaddr,
2559                 mfn_pte(mfn, PAGE_KERNEL), flags);
2560 
2561         set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2562     }
2563 
2564     xen_mc_issue(0);
2565 }
2566 
2567 /*
2568  * Perform the hypercall to exchange a region of our pfns to point to
2569  * memory with the required contiguous alignment.  Takes the pfns as
2570  * input, and populates mfns as output.
2571  *
2572  * Returns a success code indicating whether the hypervisor was able to
2573  * satisfy the request or not.
2574  */
2575 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2576                    unsigned long *pfns_in,
2577                    unsigned long extents_out,
2578                    unsigned int order_out,
2579                    unsigned long *mfns_out,
2580                    unsigned int address_bits)
2581 {
2582     long rc;
2583     int success;
2584 
2585     struct xen_memory_exchange exchange = {
2586         .in = {
2587             .nr_extents   = extents_in,
2588             .extent_order = order_in,
2589             .extent_start = pfns_in,
2590             .domid        = DOMID_SELF
2591         },
2592         .out = {
2593             .nr_extents   = extents_out,
2594             .extent_order = order_out,
2595             .extent_start = mfns_out,
2596             .address_bits = address_bits,
2597             .domid        = DOMID_SELF
2598         }
2599     };
2600 
2601     BUG_ON(extents_in << order_in != extents_out << order_out);
2602 
2603     rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2604     success = (exchange.nr_exchanged == extents_in);
2605 
2606     BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2607     BUG_ON(success && (rc != 0));
2608 
2609     return success;
2610 }
2611 
2612 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2613                  unsigned int address_bits,
2614                  dma_addr_t *dma_handle)
2615 {
2616     unsigned long *in_frames = discontig_frames, out_frame;
2617     unsigned long  flags;
2618     int            success;
2619     unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2620 
2621     /*
2622      * Currently an auto-translated guest will not perform I/O, nor will
2623      * it require PAE page directories below 4GB. Therefore any calls to
2624      * this function are redundant and can be ignored.
2625      */
2626 
2627     if (xen_feature(XENFEAT_auto_translated_physmap))
2628         return 0;
2629 
2630     if (unlikely(order > MAX_CONTIG_ORDER))
2631         return -ENOMEM;
2632 
2633     memset((void *) vstart, 0, PAGE_SIZE << order);
2634 
2635     spin_lock_irqsave(&xen_reservation_lock, flags);
2636 
2637     /* 1. Zap current PTEs, remembering MFNs. */
2638     xen_zap_pfn_range(vstart, order, in_frames, NULL);
2639 
2640     /* 2. Get a new contiguous memory extent. */
2641     out_frame = virt_to_pfn(vstart);
2642     success = xen_exchange_memory(1UL << order, 0, in_frames,
2643                       1, order, &out_frame,
2644                       address_bits);
2645 
2646     /* 3. Map the new extent in place of old pages. */
2647     if (success)
2648         xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2649     else
2650         xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2651 
2652     spin_unlock_irqrestore(&xen_reservation_lock, flags);
2653 
2654     *dma_handle = virt_to_machine(vstart).maddr;
2655     return success ? 0 : -ENOMEM;
2656 }
2657 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2658 
2659 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2660 {
2661     unsigned long *out_frames = discontig_frames, in_frame;
2662     unsigned long  flags;
2663     int success;
2664     unsigned long vstart;
2665 
2666     if (xen_feature(XENFEAT_auto_translated_physmap))
2667         return;
2668 
2669     if (unlikely(order > MAX_CONTIG_ORDER))
2670         return;
2671 
2672     vstart = (unsigned long)phys_to_virt(pstart);
2673     memset((void *) vstart, 0, PAGE_SIZE << order);
2674 
2675     spin_lock_irqsave(&xen_reservation_lock, flags);
2676 
2677     /* 1. Find start MFN of contiguous extent. */
2678     in_frame = virt_to_mfn(vstart);
2679 
2680     /* 2. Zap current PTEs. */
2681     xen_zap_pfn_range(vstart, order, NULL, out_frames);
2682 
2683     /* 3. Do the exchange for non-contiguous MFNs. */
2684     success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2685                     0, out_frames, 0);
2686 
2687     /* 4. Map new pages in place of old pages. */
2688     if (success)
2689         xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2690     else
2691         xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2692 
2693     spin_unlock_irqrestore(&xen_reservation_lock, flags);
2694 }
2695 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2696 
2697 #ifdef CONFIG_XEN_PVHVM
2698 #ifdef CONFIG_PROC_VMCORE
2699 /*
2700  * This function is used in two contexts:
2701  * - the kdump kernel has to check whether a pfn of the crashed kernel
2702  *   was a ballooned page. vmcore is using this function to decide
2703  *   whether to access a pfn of the crashed kernel.
2704  * - the kexec kernel has to check whether a pfn was ballooned by the
2705  *   previous kernel. If the pfn is ballooned, handle it properly.
2706  * Returns 0 if the pfn is not backed by a RAM page, the caller may
2707  * handle the pfn special in this case.
2708  */
2709 static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2710 {
2711     struct xen_hvm_get_mem_type a = {
2712         .domid = DOMID_SELF,
2713         .pfn = pfn,
2714     };
2715     int ram;
2716 
2717     if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2718         return -ENXIO;
2719 
2720     switch (a.mem_type) {
2721         case HVMMEM_mmio_dm:
2722             ram = 0;
2723             break;
2724         case HVMMEM_ram_rw:
2725         case HVMMEM_ram_ro:
2726         default:
2727             ram = 1;
2728             break;
2729     }
2730 
2731     return ram;
2732 }
2733 #endif
2734 
2735 static void xen_hvm_exit_mmap(struct mm_struct *mm)
2736 {
2737     struct xen_hvm_pagetable_dying a;
2738     int rc;
2739 
2740     a.domid = DOMID_SELF;
2741     a.gpa = __pa(mm->pgd);
2742     rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2743     WARN_ON_ONCE(rc < 0);
2744 }
2745 
2746 static int is_pagetable_dying_supported(void)
2747 {
2748     struct xen_hvm_pagetable_dying a;
2749     int rc = 0;
2750 
2751     a.domid = DOMID_SELF;
2752     a.gpa = 0x00;
2753     rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2754     if (rc < 0) {
2755         printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2756         return 0;
2757     }
2758     return 1;
2759 }
2760 
2761 void __init xen_hvm_init_mmu_ops(void)
2762 {
2763     if (is_pagetable_dying_supported())
2764         pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2765 #ifdef CONFIG_PROC_VMCORE
2766     register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2767 #endif
2768 }
2769 #endif
2770 
2771 #define REMAP_BATCH_SIZE 16
2772 
2773 struct remap_data {
2774     xen_pfn_t *mfn;
2775     bool contiguous;
2776     pgprot_t prot;
2777     struct mmu_update *mmu_update;
2778 };
2779 
2780 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2781                  unsigned long addr, void *data)
2782 {
2783     struct remap_data *rmd = data;
2784     pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
2785 
2786     /* If we have a contiguous range, just update the mfn itself,
2787        else update pointer to be "next mfn". */
2788     if (rmd->contiguous)
2789         (*rmd->mfn)++;
2790     else
2791         rmd->mfn++;
2792 
2793     rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2794     rmd->mmu_update->val = pte_val_ma(pte);
2795     rmd->mmu_update++;
2796 
2797     return 0;
2798 }
2799 
2800 static int do_remap_gfn(struct vm_area_struct *vma,
2801             unsigned long addr,
2802             xen_pfn_t *gfn, int nr,
2803             int *err_ptr, pgprot_t prot,
2804             unsigned domid,
2805             struct page **pages)
2806 {
2807     int err = 0;
2808     struct remap_data rmd;
2809     struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2810     unsigned long range;
2811     int mapped = 0;
2812 
2813     BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2814 
2815     if (xen_feature(XENFEAT_auto_translated_physmap)) {
2816 #ifdef CONFIG_XEN_PVH
2817         /* We need to update the local page tables and the xen HAP */
2818         return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
2819                          prot, domid, pages);
2820 #else
2821         return -EINVAL;
2822 #endif
2823         }
2824 
2825     rmd.mfn = gfn;
2826     rmd.prot = prot;
2827     /* We use the err_ptr to indicate if there we are doing a contiguous
2828      * mapping or a discontigious mapping. */
2829     rmd.contiguous = !err_ptr;
2830 
2831     while (nr) {
2832         int index = 0;
2833         int done = 0;
2834         int batch = min(REMAP_BATCH_SIZE, nr);
2835         int batch_left = batch;
2836         range = (unsigned long)batch << PAGE_SHIFT;
2837 
2838         rmd.mmu_update = mmu_update;
2839         err = apply_to_page_range(vma->vm_mm, addr, range,
2840                       remap_area_mfn_pte_fn, &rmd);
2841         if (err)
2842             goto out;
2843 
2844         /* We record the error for each page that gives an error, but
2845          * continue mapping until the whole set is done */
2846         do {
2847             int i;
2848 
2849             err = HYPERVISOR_mmu_update(&mmu_update[index],
2850                             batch_left, &done, domid);
2851 
2852             /*
2853              * @err_ptr may be the same buffer as @gfn, so
2854              * only clear it after each chunk of @gfn is
2855              * used.
2856              */
2857             if (err_ptr) {
2858                 for (i = index; i < index + done; i++)
2859                     err_ptr[i] = 0;
2860             }
2861             if (err < 0) {
2862                 if (!err_ptr)
2863                     goto out;
2864                 err_ptr[i] = err;
2865                 done++; /* Skip failed frame. */
2866             } else
2867                 mapped += done;
2868             batch_left -= done;
2869             index += done;
2870         } while (batch_left);
2871 
2872         nr -= batch;
2873         addr += range;
2874         if (err_ptr)
2875             err_ptr += batch;
2876         cond_resched();
2877     }
2878 out:
2879 
2880     xen_flush_tlb_all();
2881 
2882     return err < 0 ? err : mapped;
2883 }
2884 
2885 int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
2886                    unsigned long addr,
2887                    xen_pfn_t gfn, int nr,
2888                    pgprot_t prot, unsigned domid,
2889                    struct page **pages)
2890 {
2891     return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
2892 }
2893 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
2894 
2895 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
2896                    unsigned long addr,
2897                    xen_pfn_t *gfn, int nr,
2898                    int *err_ptr, pgprot_t prot,
2899                    unsigned domid, struct page **pages)
2900 {
2901     /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
2902      * and the consequences later is quite hard to detect what the actual
2903      * cause of "wrong memory was mapped in".
2904      */
2905     BUG_ON(err_ptr == NULL);
2906     return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
2907 }
2908 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
2909 
2910 
2911 /* Returns: 0 success */
2912 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
2913                    int numpgs, struct page **pages)
2914 {
2915     if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2916         return 0;
2917 
2918 #ifdef CONFIG_XEN_PVH
2919     return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
2920 #else
2921     return -EINVAL;
2922 #endif
2923 }
2924 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);