Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
0004  *
0005  * Author: Yu Liu, yu.liu@freescale.com
0006  *         Scott Wood, scottwood@freescale.com
0007  *         Ashish Kalra, ashish.kalra@freescale.com
0008  *         Varun Sethi, varun.sethi@freescale.com
0009  *         Alexander Graf, agraf@suse.de
0010  *
0011  * Description:
0012  * This file is based on arch/powerpc/kvm/44x_tlb.c,
0013  * by Hollis Blanchard <hollisb@us.ibm.com>.
0014  */
0015 
0016 #include <linux/kernel.h>
0017 #include <linux/types.h>
0018 #include <linux/slab.h>
0019 #include <linux/string.h>
0020 #include <linux/kvm.h>
0021 #include <linux/kvm_host.h>
0022 #include <linux/highmem.h>
0023 #include <linux/log2.h>
0024 #include <linux/uaccess.h>
0025 #include <linux/sched/mm.h>
0026 #include <linux/rwsem.h>
0027 #include <linux/vmalloc.h>
0028 #include <linux/hugetlb.h>
0029 #include <asm/kvm_ppc.h>
0030 #include <asm/pte-walk.h>
0031 
0032 #include "e500.h"
0033 #include "timing.h"
0034 #include "e500_mmu_host.h"
0035 
0036 #include "trace_booke.h"
0037 
0038 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
0039 
0040 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
0041 
0042 static inline unsigned int tlb1_max_shadow_size(void)
0043 {
0044     /* reserve one entry for magic page */
0045     return host_tlb_params[1].entries - tlbcam_index - 1;
0046 }
0047 
0048 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
0049 {
0050     /* Mask off reserved bits. */
0051     mas3 &= MAS3_ATTRIB_MASK;
0052 
0053 #ifndef CONFIG_KVM_BOOKE_HV
0054     if (!usermode) {
0055         /* Guest is in supervisor mode,
0056          * so we need to translate guest
0057          * supervisor permissions into user permissions. */
0058         mas3 &= ~E500_TLB_USER_PERM_MASK;
0059         mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
0060     }
0061     mas3 |= E500_TLB_SUPER_PERM_MASK;
0062 #endif
0063     return mas3;
0064 }
0065 
0066 /*
0067  * writing shadow tlb entry to host TLB
0068  */
0069 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
0070                      uint32_t mas0,
0071                      uint32_t lpid)
0072 {
0073     unsigned long flags;
0074 
0075     local_irq_save(flags);
0076     mtspr(SPRN_MAS0, mas0);
0077     mtspr(SPRN_MAS1, stlbe->mas1);
0078     mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
0079     mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
0080     mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
0081 #ifdef CONFIG_KVM_BOOKE_HV
0082     mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
0083 #endif
0084     asm volatile("isync; tlbwe" : : : "memory");
0085 
0086 #ifdef CONFIG_KVM_BOOKE_HV
0087     /* Must clear mas8 for other host tlbwe's */
0088     mtspr(SPRN_MAS8, 0);
0089     isync();
0090 #endif
0091     local_irq_restore(flags);
0092 
0093     trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
0094                                   stlbe->mas2, stlbe->mas7_3);
0095 }
0096 
0097 /*
0098  * Acquire a mas0 with victim hint, as if we just took a TLB miss.
0099  *
0100  * We don't care about the address we're searching for, other than that it's
0101  * in the right set and is not present in the TLB.  Using a zero PID and a
0102  * userspace address means we don't have to set and then restore MAS5, or
0103  * calculate a proper MAS6 value.
0104  */
0105 static u32 get_host_mas0(unsigned long eaddr)
0106 {
0107     unsigned long flags;
0108     u32 mas0;
0109     u32 mas4;
0110 
0111     local_irq_save(flags);
0112     mtspr(SPRN_MAS6, 0);
0113     mas4 = mfspr(SPRN_MAS4);
0114     mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
0115     asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
0116     mas0 = mfspr(SPRN_MAS0);
0117     mtspr(SPRN_MAS4, mas4);
0118     local_irq_restore(flags);
0119 
0120     return mas0;
0121 }
0122 
0123 /* sesel is for tlb1 only */
0124 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
0125         int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
0126 {
0127     u32 mas0;
0128 
0129     if (tlbsel == 0) {
0130         mas0 = get_host_mas0(stlbe->mas2);
0131         __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
0132     } else {
0133         __write_host_tlbe(stlbe,
0134                   MAS0_TLBSEL(1) |
0135                   MAS0_ESEL(to_htlb1_esel(sesel)),
0136                   vcpu_e500->vcpu.kvm->arch.lpid);
0137     }
0138 }
0139 
0140 /* sesel is for tlb1 only */
0141 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
0142             struct kvm_book3e_206_tlb_entry *gtlbe,
0143             struct kvm_book3e_206_tlb_entry *stlbe,
0144             int stlbsel, int sesel)
0145 {
0146     int stid;
0147 
0148     preempt_disable();
0149     stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
0150 
0151     stlbe->mas1 |= MAS1_TID(stid);
0152     write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
0153     preempt_enable();
0154 }
0155 
0156 #ifdef CONFIG_KVM_E500V2
0157 /* XXX should be a hook in the gva2hpa translation */
0158 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
0159 {
0160     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0161     struct kvm_book3e_206_tlb_entry magic;
0162     ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
0163     unsigned int stid;
0164     kvm_pfn_t pfn;
0165 
0166     pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
0167     get_page(pfn_to_page(pfn));
0168 
0169     preempt_disable();
0170     stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
0171 
0172     magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
0173              MAS1_TSIZE(BOOK3E_PAGESZ_4K);
0174     magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
0175     magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
0176                MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
0177     magic.mas8 = 0;
0178 
0179     __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
0180     preempt_enable();
0181 }
0182 #endif
0183 
0184 void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
0185              int esel)
0186 {
0187     struct kvm_book3e_206_tlb_entry *gtlbe =
0188         get_entry(vcpu_e500, tlbsel, esel);
0189     struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
0190 
0191     /* Don't bother with unmapped entries */
0192     if (!(ref->flags & E500_TLB_VALID)) {
0193         WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
0194              "%s: flags %x\n", __func__, ref->flags);
0195         WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
0196     }
0197 
0198     if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
0199         u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
0200         int hw_tlb_indx;
0201         unsigned long flags;
0202 
0203         local_irq_save(flags);
0204         while (tmp) {
0205             hw_tlb_indx = __ilog2_u64(tmp & -tmp);
0206             mtspr(SPRN_MAS0,
0207                   MAS0_TLBSEL(1) |
0208                   MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
0209             mtspr(SPRN_MAS1, 0);
0210             asm volatile("tlbwe");
0211             vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
0212             tmp &= tmp - 1;
0213         }
0214         mb();
0215         vcpu_e500->g2h_tlb1_map[esel] = 0;
0216         ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
0217         local_irq_restore(flags);
0218     }
0219 
0220     if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
0221         /*
0222          * TLB1 entry is backed by 4k pages. This should happen
0223          * rarely and is not worth optimizing. Invalidate everything.
0224          */
0225         kvmppc_e500_tlbil_all(vcpu_e500);
0226         ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
0227     }
0228 
0229     /*
0230      * If TLB entry is still valid then it's a TLB0 entry, and thus
0231      * backed by at most one host tlbe per shadow pid
0232      */
0233     if (ref->flags & E500_TLB_VALID)
0234         kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
0235 
0236     /* Mark the TLB as not backed by the host anymore */
0237     ref->flags = 0;
0238 }
0239 
0240 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
0241 {
0242     return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
0243 }
0244 
0245 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
0246                      struct kvm_book3e_206_tlb_entry *gtlbe,
0247                      kvm_pfn_t pfn, unsigned int wimg)
0248 {
0249     ref->pfn = pfn;
0250     ref->flags = E500_TLB_VALID;
0251 
0252     /* Use guest supplied MAS2_G and MAS2_E */
0253     ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
0254 
0255     /* Mark the page accessed */
0256     kvm_set_pfn_accessed(pfn);
0257 
0258     if (tlbe_is_writable(gtlbe))
0259         kvm_set_pfn_dirty(pfn);
0260 }
0261 
0262 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
0263 {
0264     if (ref->flags & E500_TLB_VALID) {
0265         /* FIXME: don't log bogus pfn for TLB1 */
0266         trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
0267         ref->flags = 0;
0268     }
0269 }
0270 
0271 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
0272 {
0273     if (vcpu_e500->g2h_tlb1_map)
0274         memset(vcpu_e500->g2h_tlb1_map, 0,
0275                sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
0276     if (vcpu_e500->h2g_tlb1_rmap)
0277         memset(vcpu_e500->h2g_tlb1_rmap, 0,
0278                sizeof(unsigned int) * host_tlb_params[1].entries);
0279 }
0280 
0281 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
0282 {
0283     int tlbsel;
0284     int i;
0285 
0286     for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
0287         for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
0288             struct tlbe_ref *ref =
0289                 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
0290             kvmppc_e500_ref_release(ref);
0291         }
0292     }
0293 }
0294 
0295 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
0296 {
0297     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0298     kvmppc_e500_tlbil_all(vcpu_e500);
0299     clear_tlb_privs(vcpu_e500);
0300     clear_tlb1_bitmap(vcpu_e500);
0301 }
0302 
0303 /* TID must be supplied by the caller */
0304 static void kvmppc_e500_setup_stlbe(
0305     struct kvm_vcpu *vcpu,
0306     struct kvm_book3e_206_tlb_entry *gtlbe,
0307     int tsize, struct tlbe_ref *ref, u64 gvaddr,
0308     struct kvm_book3e_206_tlb_entry *stlbe)
0309 {
0310     kvm_pfn_t pfn = ref->pfn;
0311     u32 pr = vcpu->arch.shared->msr & MSR_PR;
0312 
0313     BUG_ON(!(ref->flags & E500_TLB_VALID));
0314 
0315     /* Force IPROT=0 for all guest mappings. */
0316     stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
0317     stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
0318     stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
0319             e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
0320 }
0321 
0322 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
0323     u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
0324     int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
0325     struct tlbe_ref *ref)
0326 {
0327     struct kvm_memory_slot *slot;
0328     unsigned long pfn = 0; /* silence GCC warning */
0329     unsigned long hva;
0330     int pfnmap = 0;
0331     int tsize = BOOK3E_PAGESZ_4K;
0332     int ret = 0;
0333     unsigned long mmu_seq;
0334     struct kvm *kvm = vcpu_e500->vcpu.kvm;
0335     unsigned long tsize_pages = 0;
0336     pte_t *ptep;
0337     unsigned int wimg = 0;
0338     pgd_t *pgdir;
0339     unsigned long flags;
0340 
0341     /* used to check for invalidations in progress */
0342     mmu_seq = kvm->mmu_invalidate_seq;
0343     smp_rmb();
0344 
0345     /*
0346      * Translate guest physical to true physical, acquiring
0347      * a page reference if it is normal, non-reserved memory.
0348      *
0349      * gfn_to_memslot() must succeed because otherwise we wouldn't
0350      * have gotten this far.  Eventually we should just pass the slot
0351      * pointer through from the first lookup.
0352      */
0353     slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
0354     hva = gfn_to_hva_memslot(slot, gfn);
0355 
0356     if (tlbsel == 1) {
0357         struct vm_area_struct *vma;
0358         mmap_read_lock(kvm->mm);
0359 
0360         vma = find_vma(kvm->mm, hva);
0361         if (vma && hva >= vma->vm_start &&
0362             (vma->vm_flags & VM_PFNMAP)) {
0363             /*
0364              * This VMA is a physically contiguous region (e.g.
0365              * /dev/mem) that bypasses normal Linux page
0366              * management.  Find the overlap between the
0367              * vma and the memslot.
0368              */
0369 
0370             unsigned long start, end;
0371             unsigned long slot_start, slot_end;
0372 
0373             pfnmap = 1;
0374 
0375             start = vma->vm_pgoff;
0376             end = start +
0377                   vma_pages(vma);
0378 
0379             pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
0380 
0381             slot_start = pfn - (gfn - slot->base_gfn);
0382             slot_end = slot_start + slot->npages;
0383 
0384             if (start < slot_start)
0385                 start = slot_start;
0386             if (end > slot_end)
0387                 end = slot_end;
0388 
0389             tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
0390                 MAS1_TSIZE_SHIFT;
0391 
0392             /*
0393              * e500 doesn't implement the lowest tsize bit,
0394              * or 1K pages.
0395              */
0396             tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
0397 
0398             /*
0399              * Now find the largest tsize (up to what the guest
0400              * requested) that will cover gfn, stay within the
0401              * range, and for which gfn and pfn are mutually
0402              * aligned.
0403              */
0404 
0405             for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
0406                 unsigned long gfn_start, gfn_end;
0407                 tsize_pages = 1UL << (tsize - 2);
0408 
0409                 gfn_start = gfn & ~(tsize_pages - 1);
0410                 gfn_end = gfn_start + tsize_pages;
0411 
0412                 if (gfn_start + pfn - gfn < start)
0413                     continue;
0414                 if (gfn_end + pfn - gfn > end)
0415                     continue;
0416                 if ((gfn & (tsize_pages - 1)) !=
0417                     (pfn & (tsize_pages - 1)))
0418                     continue;
0419 
0420                 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
0421                 pfn &= ~(tsize_pages - 1);
0422                 break;
0423             }
0424         } else if (vma && hva >= vma->vm_start &&
0425                is_vm_hugetlb_page(vma)) {
0426             unsigned long psize = vma_kernel_pagesize(vma);
0427 
0428             tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
0429                 MAS1_TSIZE_SHIFT;
0430 
0431             /*
0432              * Take the largest page size that satisfies both host
0433              * and guest mapping
0434              */
0435             tsize = min(__ilog2(psize) - 10, tsize);
0436 
0437             /*
0438              * e500 doesn't implement the lowest tsize bit,
0439              * or 1K pages.
0440              */
0441             tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
0442         }
0443 
0444         mmap_read_unlock(kvm->mm);
0445     }
0446 
0447     if (likely(!pfnmap)) {
0448         tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
0449         pfn = gfn_to_pfn_memslot(slot, gfn);
0450         if (is_error_noslot_pfn(pfn)) {
0451             if (printk_ratelimit())
0452                 pr_err("%s: real page not found for gfn %lx\n",
0453                        __func__, (long)gfn);
0454             return -EINVAL;
0455         }
0456 
0457         /* Align guest and physical address to page map boundaries */
0458         pfn &= ~(tsize_pages - 1);
0459         gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
0460     }
0461 
0462     spin_lock(&kvm->mmu_lock);
0463     if (mmu_invalidate_retry(kvm, mmu_seq)) {
0464         ret = -EAGAIN;
0465         goto out;
0466     }
0467 
0468 
0469     pgdir = vcpu_e500->vcpu.arch.pgdir;
0470     /*
0471      * We are just looking at the wimg bits, so we don't
0472      * care much about the trans splitting bit.
0473      * We are holding kvm->mmu_lock so a notifier invalidate
0474      * can't run hence pfn won't change.
0475      */
0476     local_irq_save(flags);
0477     ptep = find_linux_pte(pgdir, hva, NULL, NULL);
0478     if (ptep) {
0479         pte_t pte = READ_ONCE(*ptep);
0480 
0481         if (pte_present(pte)) {
0482             wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
0483                 MAS2_WIMGE_MASK;
0484             local_irq_restore(flags);
0485         } else {
0486             local_irq_restore(flags);
0487             pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
0488                        __func__, (long)gfn, pfn);
0489             ret = -EINVAL;
0490             goto out;
0491         }
0492     }
0493     kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
0494 
0495     kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
0496                 ref, gvaddr, stlbe);
0497 
0498     /* Clear i-cache for new pages */
0499     kvmppc_mmu_flush_icache(pfn);
0500 
0501 out:
0502     spin_unlock(&kvm->mmu_lock);
0503 
0504     /* Drop refcount on page, so that mmu notifiers can clear it */
0505     kvm_release_pfn_clean(pfn);
0506 
0507     return ret;
0508 }
0509 
0510 /* XXX only map the one-one case, for now use TLB0 */
0511 static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
0512                 struct kvm_book3e_206_tlb_entry *stlbe)
0513 {
0514     struct kvm_book3e_206_tlb_entry *gtlbe;
0515     struct tlbe_ref *ref;
0516     int stlbsel = 0;
0517     int sesel = 0;
0518     int r;
0519 
0520     gtlbe = get_entry(vcpu_e500, 0, esel);
0521     ref = &vcpu_e500->gtlb_priv[0][esel].ref;
0522 
0523     r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
0524             get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
0525             gtlbe, 0, stlbe, ref);
0526     if (r)
0527         return r;
0528 
0529     write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
0530 
0531     return 0;
0532 }
0533 
0534 static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
0535                      struct tlbe_ref *ref,
0536                      int esel)
0537 {
0538     unsigned int sesel = vcpu_e500->host_tlb1_nv++;
0539 
0540     if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
0541         vcpu_e500->host_tlb1_nv = 0;
0542 
0543     if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
0544         unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
0545         vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
0546     }
0547 
0548     vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
0549     vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
0550     vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
0551     WARN_ON(!(ref->flags & E500_TLB_VALID));
0552 
0553     return sesel;
0554 }
0555 
0556 /* Caller must ensure that the specified guest TLB entry is safe to insert into
0557  * the shadow TLB. */
0558 /* For both one-one and one-to-many */
0559 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
0560         u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
0561         struct kvm_book3e_206_tlb_entry *stlbe, int esel)
0562 {
0563     struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
0564     int sesel;
0565     int r;
0566 
0567     r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
0568                    ref);
0569     if (r)
0570         return r;
0571 
0572     /* Use TLB0 when we can only map a page with 4k */
0573     if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
0574         vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
0575         write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
0576         return 0;
0577     }
0578 
0579     /* Otherwise map into TLB1 */
0580     sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
0581     write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
0582 
0583     return 0;
0584 }
0585 
0586 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
0587             unsigned int index)
0588 {
0589     struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
0590     struct tlbe_priv *priv;
0591     struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
0592     int tlbsel = tlbsel_of(index);
0593     int esel = esel_of(index);
0594 
0595     gtlbe = get_entry(vcpu_e500, tlbsel, esel);
0596 
0597     switch (tlbsel) {
0598     case 0:
0599         priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
0600 
0601         /* Triggers after clear_tlb_privs or on initial mapping */
0602         if (!(priv->ref.flags & E500_TLB_VALID)) {
0603             kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
0604         } else {
0605             kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
0606                         &priv->ref, eaddr, &stlbe);
0607             write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
0608         }
0609         break;
0610 
0611     case 1: {
0612         gfn_t gfn = gpaddr >> PAGE_SHIFT;
0613         kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
0614                      esel);
0615         break;
0616     }
0617 
0618     default:
0619         BUG();
0620         break;
0621     }
0622 }
0623 
0624 #ifdef CONFIG_KVM_BOOKE_HV
0625 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
0626         enum instruction_fetch_type type, u32 *instr)
0627 {
0628     gva_t geaddr;
0629     hpa_t addr;
0630     hfn_t pfn;
0631     hva_t eaddr;
0632     u32 mas1, mas2, mas3;
0633     u64 mas7_mas3;
0634     struct page *page;
0635     unsigned int addr_space, psize_shift;
0636     bool pr;
0637     unsigned long flags;
0638 
0639     /* Search TLB for guest pc to get the real address */
0640     geaddr = kvmppc_get_pc(vcpu);
0641 
0642     addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
0643 
0644     local_irq_save(flags);
0645     mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
0646     mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
0647     asm volatile("tlbsx 0, %[geaddr]\n" : :
0648              [geaddr] "r" (geaddr));
0649     mtspr(SPRN_MAS5, 0);
0650     mtspr(SPRN_MAS8, 0);
0651     mas1 = mfspr(SPRN_MAS1);
0652     mas2 = mfspr(SPRN_MAS2);
0653     mas3 = mfspr(SPRN_MAS3);
0654 #ifdef CONFIG_64BIT
0655     mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
0656 #else
0657     mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
0658 #endif
0659     local_irq_restore(flags);
0660 
0661     /*
0662      * If the TLB entry for guest pc was evicted, return to the guest.
0663      * There are high chances to find a valid TLB entry next time.
0664      */
0665     if (!(mas1 & MAS1_VALID))
0666         return EMULATE_AGAIN;
0667 
0668     /*
0669      * Another thread may rewrite the TLB entry in parallel, don't
0670      * execute from the address if the execute permission is not set
0671      */
0672     pr = vcpu->arch.shared->msr & MSR_PR;
0673     if (unlikely((pr && !(mas3 & MAS3_UX)) ||
0674              (!pr && !(mas3 & MAS3_SX)))) {
0675         pr_err_ratelimited(
0676             "%s: Instruction emulation from guest address %08lx without execute permission\n",
0677             __func__, geaddr);
0678         return EMULATE_AGAIN;
0679     }
0680 
0681     /*
0682      * The real address will be mapped by a cacheable, memory coherent,
0683      * write-back page. Check for mismatches when LRAT is used.
0684      */
0685     if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
0686         unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
0687         pr_err_ratelimited(
0688             "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
0689             __func__, geaddr);
0690         return EMULATE_AGAIN;
0691     }
0692 
0693     /* Get pfn */
0694     psize_shift = MAS1_GET_TSIZE(mas1) + 10;
0695     addr = (mas7_mas3 & (~0ULL << psize_shift)) |
0696            (geaddr & ((1ULL << psize_shift) - 1ULL));
0697     pfn = addr >> PAGE_SHIFT;
0698 
0699     /* Guard against emulation from devices area */
0700     if (unlikely(!page_is_ram(pfn))) {
0701         pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
0702              __func__, addr);
0703         return EMULATE_AGAIN;
0704     }
0705 
0706     /* Map a page and get guest's instruction */
0707     page = pfn_to_page(pfn);
0708     eaddr = (unsigned long)kmap_atomic(page);
0709     *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
0710     kunmap_atomic((u32 *)eaddr);
0711 
0712     return EMULATE_DONE;
0713 }
0714 #else
0715 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
0716         enum instruction_fetch_type type, u32 *instr)
0717 {
0718     return EMULATE_AGAIN;
0719 }
0720 #endif
0721 
0722 /************* MMU Notifiers *************/
0723 
0724 static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
0725 {
0726     /*
0727      * Flush all shadow tlb entries everywhere. This is slow, but
0728      * we are 100% sure that we catch the to be unmapped page
0729      */
0730     return true;
0731 }
0732 
0733 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
0734 {
0735     return kvm_e500_mmu_unmap_gfn(kvm, range);
0736 }
0737 
0738 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
0739 {
0740     /* XXX could be more clever ;) */
0741     return false;
0742 }
0743 
0744 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
0745 {
0746     /* XXX could be more clever ;) */
0747     return false;
0748 }
0749 
0750 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
0751 {
0752     /* The page will get remapped properly on its next fault */
0753     return kvm_e500_mmu_unmap_gfn(kvm, range);
0754 }
0755 
0756 /*****************************************/
0757 
0758 int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
0759 {
0760     host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
0761     host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
0762 
0763     /*
0764      * This should never happen on real e500 hardware, but is
0765      * architecturally possible -- e.g. in some weird nested
0766      * virtualization case.
0767      */
0768     if (host_tlb_params[0].entries == 0 ||
0769         host_tlb_params[1].entries == 0) {
0770         pr_err("%s: need to know host tlb size\n", __func__);
0771         return -ENODEV;
0772     }
0773 
0774     host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
0775                   TLBnCFG_ASSOC_SHIFT;
0776     host_tlb_params[1].ways = host_tlb_params[1].entries;
0777 
0778     if (!is_power_of_2(host_tlb_params[0].entries) ||
0779         !is_power_of_2(host_tlb_params[0].ways) ||
0780         host_tlb_params[0].entries < host_tlb_params[0].ways ||
0781         host_tlb_params[0].ways == 0) {
0782         pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
0783                __func__, host_tlb_params[0].entries,
0784                host_tlb_params[0].ways);
0785         return -ENODEV;
0786     }
0787 
0788     host_tlb_params[0].sets =
0789         host_tlb_params[0].entries / host_tlb_params[0].ways;
0790     host_tlb_params[1].sets = 1;
0791     vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
0792                        sizeof(*vcpu_e500->h2g_tlb1_rmap),
0793                        GFP_KERNEL);
0794     if (!vcpu_e500->h2g_tlb1_rmap)
0795         return -EINVAL;
0796 
0797     return 0;
0798 }
0799 
0800 void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
0801 {
0802     kfree(vcpu_e500->h2g_tlb1_rmap);
0803 }