Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *
0004  * Copyright SUSE Linux Products GmbH 2010
0005  *
0006  * Authors: Alexander Graf <agraf@suse.de>
0007  */
0008 
0009 #ifndef __ASM_KVM_BOOK3S_64_H__
0010 #define __ASM_KVM_BOOK3S_64_H__
0011 
0012 #include <linux/string.h>
0013 #include <asm/bitops.h>
0014 #include <asm/book3s/64/mmu-hash.h>
0015 #include <asm/cpu_has_feature.h>
0016 #include <asm/ppc-opcode.h>
0017 #include <asm/pte-walk.h>
0018 
0019 /*
0020  * Structure for a nested guest, that is, for a guest that is managed by
0021  * one of our guests.
0022  */
0023 struct kvm_nested_guest {
0024     struct kvm *l1_host;        /* L1 VM that owns this nested guest */
0025     int l1_lpid;            /* lpid L1 guest thinks this guest is */
0026     int shadow_lpid;        /* real lpid of this nested guest */
0027     pgd_t *shadow_pgtable;      /* our page table for this guest */
0028     u64 l1_gr_to_hr;        /* L1's addr of part'n-scoped table */
0029     u64 process_table;      /* process table entry for this guest */
0030     long refcnt;            /* number of pointers to this struct */
0031     struct mutex tlb_lock;      /* serialize page faults and tlbies */
0032     struct kvm_nested_guest *next;
0033     cpumask_t need_tlb_flush;
0034     short prev_cpu[NR_CPUS];
0035     u8 radix;           /* is this nested guest radix */
0036 };
0037 
0038 /*
0039  * We define a nested rmap entry as a single 64-bit quantity
0040  * 0xFFF0000000000000   12-bit lpid field
0041  * 0x000FFFFFFFFFF000   40-bit guest 4k page frame number
0042  * 0x0000000000000001   1-bit  single entry flag
0043  */
0044 #define RMAP_NESTED_LPID_MASK       0xFFF0000000000000UL
0045 #define RMAP_NESTED_LPID_SHIFT      (52)
0046 #define RMAP_NESTED_GPA_MASK        0x000FFFFFFFFFF000UL
0047 #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
0048 
0049 /* Structure for a nested guest rmap entry */
0050 struct rmap_nested {
0051     struct llist_node list;
0052     u64 rmap;
0053 };
0054 
0055 /*
0056  * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
0057  *               safe against removal of the list entry or NULL list
0058  * @pos:    a (struct rmap_nested *) to use as a loop cursor
0059  * @node:   pointer to the first entry
0060  *      NOTE: this can be NULL
0061  * @rmapp:  an (unsigned long *) in which to return the rmap entries on each
0062  *      iteration
0063  *      NOTE: this must point to already allocated memory
0064  *
0065  * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
0066  * rmap entry in the memslot. The list is always terminated by a "single entry"
0067  * stored in the list element of the final entry of the llist. If there is ONLY
0068  * a single entry then this is itself in the rmap entry of the memslot, not a
0069  * llist head pointer.
0070  *
0071  * Note that the iterator below assumes that a nested rmap entry is always
0072  * non-zero.  This is true for our usage because the LPID field is always
0073  * non-zero (zero is reserved for the host).
0074  *
0075  * This should be used to iterate over the list of rmap_nested entries with
0076  * processing done on the u64 rmap value given by each iteration. This is safe
0077  * against removal of list entries and it is always safe to call free on (pos).
0078  *
0079  * e.g.
0080  * struct rmap_nested *cursor;
0081  * struct llist_node *first;
0082  * unsigned long rmap;
0083  * for_each_nest_rmap_safe(cursor, first, &rmap) {
0084  *  do_something(rmap);
0085  *  free(cursor);
0086  * }
0087  */
0088 #define for_each_nest_rmap_safe(pos, node, rmapp)                  \
0089     for ((pos) = llist_entry((node), typeof(*(pos)), list);            \
0090          (node) &&                                 \
0091          (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?     \
0092               ((u64) (node)) : ((pos)->rmap))) &&              \
0093          (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?      \
0094              ((struct llist_node *) ((pos) = NULL)) :          \
0095              (pos)->list.next)), true);                \
0096          (pos) = llist_entry((node), typeof(*(pos)), list))
0097 
0098 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
0099                       bool create);
0100 void kvmhv_put_nested(struct kvm_nested_guest *gp);
0101 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
0102 
0103 /* Encoding of first parameter for H_TLB_INVALIDATE */
0104 #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
0105                      ___PPC_R(r))
0106 
0107 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
0108 #define PPC_MIN_HPT_ORDER   18
0109 #define PPC_MAX_HPT_ORDER   46
0110 
0111 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
0112 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
0113 {
0114     preempt_disable();
0115     return &get_paca()->shadow_vcpu;
0116 }
0117 
0118 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
0119 {
0120     preempt_enable();
0121 }
0122 #endif
0123 
0124 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0125 
0126 static inline bool kvm_is_radix(struct kvm *kvm)
0127 {
0128     return kvm->arch.radix;
0129 }
0130 
0131 static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
0132 {
0133     bool radix;
0134 
0135     if (vcpu->arch.nested)
0136         radix = vcpu->arch.nested->radix;
0137     else
0138         radix = kvm_is_radix(vcpu->kvm);
0139 
0140     return radix;
0141 }
0142 
0143 unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
0144 
0145 int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
0146 
0147 #define KVM_DEFAULT_HPT_ORDER   24  /* 16MB HPT by default */
0148 #endif
0149 
0150 /*
0151  * Invalid HDSISR value which is used to indicate when HW has not set the reg.
0152  * Used to work around an errata.
0153  */
0154 #define HDSISR_CANARY   0x7fff
0155 
0156 /*
0157  * We use a lock bit in HPTE dword 0 to synchronize updates and
0158  * accesses to each HPTE, and another bit to indicate non-present
0159  * HPTEs.
0160  */
0161 #define HPTE_V_HVLOCK   0x40UL
0162 #define HPTE_V_ABSENT   0x20UL
0163 
0164 /*
0165  * We use this bit in the guest_rpte field of the revmap entry
0166  * to indicate a modified HPTE.
0167  */
0168 #define HPTE_GR_MODIFIED    (1ul << 62)
0169 
0170 /* These bits are reserved in the guest view of the HPTE */
0171 #define HPTE_GR_RESERVED    HPTE_GR_MODIFIED
0172 
0173 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
0174 {
0175     unsigned long tmp, old;
0176     __be64 be_lockbit, be_bits;
0177 
0178     /*
0179      * We load/store in native endian, but the HTAB is in big endian. If
0180      * we byte swap all data we apply on the PTE we're implicitly correct
0181      * again.
0182      */
0183     be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
0184     be_bits = cpu_to_be64(bits);
0185 
0186     asm volatile("  ldarx   %0,0,%2\n"
0187              "  and.    %1,%0,%3\n"
0188              "  bne 2f\n"
0189              "  or  %0,%0,%4\n"
0190              "  stdcx.  %0,0,%2\n"
0191              "  beq+    2f\n"
0192              "  mr  %1,%3\n"
0193              "2:    isync"
0194              : "=&r" (tmp), "=&r" (old)
0195              : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
0196              : "cc", "memory");
0197     return old == 0;
0198 }
0199 
0200 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
0201 {
0202     hpte_v &= ~HPTE_V_HVLOCK;
0203     asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
0204     hpte[0] = cpu_to_be64(hpte_v);
0205 }
0206 
0207 /* Without barrier */
0208 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
0209 {
0210     hpte_v &= ~HPTE_V_HVLOCK;
0211     hpte[0] = cpu_to_be64(hpte_v);
0212 }
0213 
0214 /*
0215  * These functions encode knowledge of the POWER7/8/9 hardware
0216  * interpretations of the HPTE LP (large page size) field.
0217  */
0218 static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
0219 {
0220     unsigned int lphi;
0221 
0222     if (!(h & HPTE_V_LARGE))
0223         return 12;  /* 4kB */
0224     lphi = (l >> 16) & 0xf;
0225     switch ((l >> 12) & 0xf) {
0226     case 0:
0227         return !lphi ? 24 : 0;      /* 16MB */
0228         break;
0229     case 1:
0230         return 16;          /* 64kB */
0231         break;
0232     case 3:
0233         return !lphi ? 34 : 0;      /* 16GB */
0234         break;
0235     case 7:
0236         return (16 << 8) + 12;      /* 64kB in 4kB */
0237         break;
0238     case 8:
0239         if (!lphi)
0240             return (24 << 8) + 16;  /* 16MB in 64kkB */
0241         if (lphi == 3)
0242             return (24 << 8) + 12;  /* 16MB in 4kB */
0243         break;
0244     }
0245     return 0;
0246 }
0247 
0248 static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
0249 {
0250     return kvmppc_hpte_page_shifts(h, l) & 0xff;
0251 }
0252 
0253 static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
0254 {
0255     int tmp = kvmppc_hpte_page_shifts(h, l);
0256 
0257     if (tmp >= 0x100)
0258         tmp >>= 8;
0259     return tmp;
0260 }
0261 
0262 static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
0263 {
0264     int shift = kvmppc_hpte_actual_page_shift(v, r);
0265 
0266     if (shift)
0267         return 1ul << shift;
0268     return 0;
0269 }
0270 
0271 static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
0272 {
0273     switch (base_shift) {
0274     case 12:
0275         switch (actual_shift) {
0276         case 12:
0277             return 0;
0278         case 16:
0279             return 7;
0280         case 24:
0281             return 0x38;
0282         }
0283         break;
0284     case 16:
0285         switch (actual_shift) {
0286         case 16:
0287             return 1;
0288         case 24:
0289             return 8;
0290         }
0291         break;
0292     case 24:
0293         return 0;
0294     }
0295     return -1;
0296 }
0297 
0298 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
0299                          unsigned long pte_index)
0300 {
0301     int a_pgshift, b_pgshift;
0302     unsigned long rb = 0, va_low, sllp;
0303 
0304     b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
0305     if (a_pgshift >= 0x100) {
0306         b_pgshift &= 0xff;
0307         a_pgshift >>= 8;
0308     }
0309 
0310     /*
0311      * Ignore the top 14 bits of va
0312      * v have top two bits covering segment size, hence move
0313      * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
0314      * AVA field in v also have the lower 23 bits ignored.
0315      * For base page size 4K we need 14 .. 65 bits (so need to
0316      * collect extra 11 bits)
0317      * For others we need 14..14+i
0318      */
0319     /* This covers 14..54 bits of va*/
0320     rb = (v & ~0x7fUL) << 16;       /* AVA field */
0321 
0322     /*
0323      * AVA in v had cleared lower 23 bits. We need to derive
0324      * that from pteg index
0325      */
0326     va_low = pte_index >> 3;
0327     if (v & HPTE_V_SECONDARY)
0328         va_low = ~va_low;
0329     /*
0330      * get the vpn bits from va_low using reverse of hashing.
0331      * In v we have va with 23 bits dropped and then left shifted
0332      * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
0333      * right shift it with (SID_SHIFT - (23 - 7))
0334      */
0335     if (!(v & HPTE_V_1TB_SEG))
0336         va_low ^= v >> (SID_SHIFT - 16);
0337     else
0338         va_low ^= v >> (SID_SHIFT_1T - 16);
0339     va_low &= 0x7ff;
0340 
0341     if (b_pgshift <= 12) {
0342         if (a_pgshift > 12) {
0343             sllp = (a_pgshift == 16) ? 5 : 4;
0344             rb |= sllp << 5;    /*  AP field */
0345         }
0346         rb |= (va_low & 0x7ff) << 12;   /* remaining 11 bits of AVA */
0347     } else {
0348         int aval_shift;
0349         /*
0350          * remaining bits of AVA/LP fields
0351          * Also contain the rr bits of LP
0352          */
0353         rb |= (va_low << b_pgshift) & 0x7ff000;
0354         /*
0355          * Now clear not needed LP bits based on actual psize
0356          */
0357         rb &= ~((1ul << a_pgshift) - 1);
0358         /*
0359          * AVAL field 58..77 - base_page_shift bits of va
0360          * we have space for 58..64 bits, Missing bits should
0361          * be zero filled. +1 is to take care of L bit shift
0362          */
0363         aval_shift = 64 - (77 - b_pgshift) + 1;
0364         rb |= ((va_low << aval_shift) & 0xfe);
0365 
0366         rb |= 1;        /* L field */
0367         rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
0368     }
0369     /*
0370      * This sets both bits of the B field in the PTE. 0b1x values are
0371      * reserved, but those will have been filtered by kvmppc_do_h_enter.
0372      */
0373     rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;   /* B field */
0374     return rb;
0375 }
0376 
0377 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
0378 {
0379     return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
0380 }
0381 
0382 static inline int hpte_is_writable(unsigned long ptel)
0383 {
0384     unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
0385 
0386     return pp != PP_RXRX && pp != PP_RXXX;
0387 }
0388 
0389 static inline unsigned long hpte_make_readonly(unsigned long ptel)
0390 {
0391     if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
0392         ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
0393     else
0394         ptel |= PP_RXRX;
0395     return ptel;
0396 }
0397 
0398 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
0399 {
0400     unsigned int wimg = hptel & HPTE_R_WIMG;
0401 
0402     /* Handle SAO */
0403     if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
0404         cpu_has_feature(CPU_FTR_ARCH_206))
0405         wimg = HPTE_R_M;
0406 
0407     if (!is_ci)
0408         return wimg == HPTE_R_M;
0409     /*
0410      * if host is mapped cache inhibited, make sure hptel also have
0411      * cache inhibited.
0412      */
0413     if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
0414         return false;
0415     return !!(wimg & HPTE_R_I);
0416 }
0417 
0418 /*
0419  * If it's present and writable, atomically set dirty and referenced bits and
0420  * return the PTE, otherwise return 0.
0421  */
0422 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
0423 {
0424     pte_t old_pte, new_pte = __pte(0);
0425 
0426     while (1) {
0427         /*
0428          * Make sure we don't reload from ptep
0429          */
0430         old_pte = READ_ONCE(*ptep);
0431         /*
0432          * wait until H_PAGE_BUSY is clear then set it atomically
0433          */
0434         if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
0435             cpu_relax();
0436             continue;
0437         }
0438         /* If pte is not present return None */
0439         if (unlikely(!pte_present(old_pte)))
0440             return __pte(0);
0441 
0442         new_pte = pte_mkyoung(old_pte);
0443         if (writing && pte_write(old_pte))
0444             new_pte = pte_mkdirty(new_pte);
0445 
0446         if (pte_xchg(ptep, old_pte, new_pte))
0447             break;
0448     }
0449     return new_pte;
0450 }
0451 
0452 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
0453 {
0454     if (key)
0455         return PP_RWRX <= pp && pp <= PP_RXRX;
0456     return true;
0457 }
0458 
0459 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
0460 {
0461     if (key)
0462         return pp == PP_RWRW;
0463     return pp <= PP_RWRW;
0464 }
0465 
0466 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
0467 {
0468     unsigned long skey;
0469 
0470     skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
0471         ((hpte_r & HPTE_R_KEY_LO) >> 9);
0472     return (amr >> (62 - 2 * skey)) & 3;
0473 }
0474 
0475 static inline void lock_rmap(unsigned long *rmap)
0476 {
0477     do {
0478         while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
0479             cpu_relax();
0480     } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
0481 }
0482 
0483 static inline void unlock_rmap(unsigned long *rmap)
0484 {
0485     __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
0486 }
0487 
0488 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
0489                    unsigned long pagesize)
0490 {
0491     unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
0492 
0493     if (pagesize <= PAGE_SIZE)
0494         return true;
0495     return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
0496 }
0497 
0498 /*
0499  * This works for 4k, 64k and 16M pages on POWER7,
0500  * and 4k and 16M pages on PPC970.
0501  */
0502 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
0503 {
0504     unsigned long senc = 0;
0505 
0506     if (psize > 0x1000) {
0507         senc = SLB_VSID_L;
0508         if (psize == 0x10000)
0509             senc |= SLB_VSID_LP_01;
0510     }
0511     return senc;
0512 }
0513 
0514 static inline int is_vrma_hpte(unsigned long hpte_v)
0515 {
0516     return (hpte_v & ~0xffffffUL) ==
0517         (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
0518 }
0519 
0520 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0521 /*
0522  * Note modification of an HPTE; set the HPTE modified bit
0523  * if anyone is interested.
0524  */
0525 static inline void note_hpte_modification(struct kvm *kvm,
0526                       struct revmap_entry *rev)
0527 {
0528     if (atomic_read(&kvm->arch.hpte_mod_interest))
0529         rev->guest_rpte |= HPTE_GR_MODIFIED;
0530 }
0531 
0532 /*
0533  * Like kvm_memslots(), but for use in real mode when we can't do
0534  * any RCU stuff (since the secondary threads are offline from the
0535  * kernel's point of view), and we can't print anything.
0536  * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
0537  */
0538 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
0539 {
0540     return rcu_dereference_raw_check(kvm->memslots[0]);
0541 }
0542 
0543 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
0544 extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
0545 
0546 extern void kvmhv_rm_send_ipi(int cpu);
0547 
0548 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
0549 {
0550     /* HPTEs are 2**4 bytes long */
0551     return 1UL << (hpt->order - 4);
0552 }
0553 
0554 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
0555 {
0556     /* 128 (2**7) bytes in each HPTEG */
0557     return (1UL << (hpt->order - 7)) - 1;
0558 }
0559 
0560 /* Set bits in a dirty bitmap, which is in LE format */
0561 static inline void set_dirty_bits(unsigned long *map, unsigned long i,
0562                   unsigned long npages)
0563 {
0564 
0565     if (npages >= 8)
0566         memset((char *)map + i / 8, 0xff, npages / 8);
0567     else
0568         for (; npages; ++i, --npages)
0569             __set_bit_le(i, map);
0570 }
0571 
0572 static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
0573                      unsigned long npages)
0574 {
0575     if (npages >= 8)
0576         memset((char *)map + i / 8, 0xff, npages / 8);
0577     else
0578         for (; npages; ++i, --npages)
0579             set_bit_le(i, map);
0580 }
0581 
0582 static inline u64 sanitize_msr(u64 msr)
0583 {
0584     msr &= ~MSR_HV;
0585     msr |= MSR_ME;
0586     return msr;
0587 }
0588 
0589 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0590 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
0591 {
0592     vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
0593     vcpu->arch.regs.xer = vcpu->arch.xer_tm;
0594     vcpu->arch.regs.link  = vcpu->arch.lr_tm;
0595     vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
0596     vcpu->arch.amr = vcpu->arch.amr_tm;
0597     vcpu->arch.ppr = vcpu->arch.ppr_tm;
0598     vcpu->arch.dscr = vcpu->arch.dscr_tm;
0599     vcpu->arch.tar = vcpu->arch.tar_tm;
0600     memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
0601            sizeof(vcpu->arch.regs.gpr));
0602     vcpu->arch.fp  = vcpu->arch.fp_tm;
0603     vcpu->arch.vr  = vcpu->arch.vr_tm;
0604     vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
0605 }
0606 
0607 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
0608 {
0609     vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
0610     vcpu->arch.xer_tm = vcpu->arch.regs.xer;
0611     vcpu->arch.lr_tm  = vcpu->arch.regs.link;
0612     vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
0613     vcpu->arch.amr_tm = vcpu->arch.amr;
0614     vcpu->arch.ppr_tm = vcpu->arch.ppr;
0615     vcpu->arch.dscr_tm = vcpu->arch.dscr;
0616     vcpu->arch.tar_tm = vcpu->arch.tar;
0617     memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
0618            sizeof(vcpu->arch.regs.gpr));
0619     vcpu->arch.fp_tm  = vcpu->arch.fp;
0620     vcpu->arch.vr_tm  = vcpu->arch.vr;
0621     vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
0622 }
0623 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
0624 
0625 extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
0626                  unsigned long gpa, unsigned int level,
0627                  unsigned long mmu_seq, unsigned int lpid,
0628                  unsigned long *rmapp, struct rmap_nested **n_rmap);
0629 extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
0630                    struct rmap_nested **n_rmap);
0631 extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
0632                        unsigned long clr, unsigned long set,
0633                        unsigned long hpa, unsigned long nbytes);
0634 extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
0635                 const struct kvm_memory_slot *memslot,
0636                 unsigned long gpa, unsigned long hpa,
0637                 unsigned long nbytes);
0638 
0639 static inline pte_t *
0640 find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
0641                 unsigned *hshift)
0642 {
0643     pte_t *pte;
0644 
0645     pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
0646     return pte;
0647 }
0648 
0649 static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
0650                         unsigned *hshift)
0651 {
0652     pte_t *pte;
0653 
0654     VM_WARN(!spin_is_locked(&kvm->mmu_lock),
0655         "%s called with kvm mmu_lock not held \n", __func__);
0656     pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
0657 
0658     return pte;
0659 }
0660 
0661 static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
0662                        unsigned long ea, unsigned *hshift)
0663 {
0664     pte_t *pte;
0665 
0666     VM_WARN(!spin_is_locked(&kvm->mmu_lock),
0667         "%s called with kvm mmu_lock not held \n", __func__);
0668 
0669     if (mmu_invalidate_retry(kvm, mmu_seq))
0670         return NULL;
0671 
0672     pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
0673 
0674     return pte;
0675 }
0676 
0677 extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
0678                     unsigned long ea, unsigned *hshift);
0679 
0680 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
0681 
0682 #endif /* __ASM_KVM_BOOK3S_64_H__ */