0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/types.h>
0010 #include <linux/string.h>
0011 #include <linux/kvm.h>
0012 #include <linux/kvm_host.h>
0013 #include <linux/highmem.h>
0014
0015 #include <asm/kvm_ppc.h>
0016 #include <asm/kvm_book3s.h>
0017 #include <asm/book3s/64/mmu-hash.h>
0018
0019
0020
0021 #ifdef DEBUG_MMU
0022 #define dprintk(X...) printk(KERN_INFO X)
0023 #else
0024 #define dprintk(X...) do { } while(0)
0025 #endif
0026
0027 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
0028 struct kvm_vcpu *vcpu,
0029 gva_t eaddr)
0030 {
0031 int i;
0032 u64 esid = GET_ESID(eaddr);
0033 u64 esid_1t = GET_ESID_1T(eaddr);
0034
0035 for (i = 0; i < vcpu->arch.slb_nr; i++) {
0036 u64 cmp_esid = esid;
0037
0038 if (!vcpu->arch.slb[i].valid)
0039 continue;
0040
0041 if (vcpu->arch.slb[i].tb)
0042 cmp_esid = esid_1t;
0043
0044 if (vcpu->arch.slb[i].esid == cmp_esid)
0045 return &vcpu->arch.slb[i];
0046 }
0047
0048 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
0049 eaddr, esid, esid_1t);
0050 for (i = 0; i < vcpu->arch.slb_nr; i++) {
0051 if (vcpu->arch.slb[i].vsid)
0052 dprintk(" %d: %c%c%c %llx %llx\n", i,
0053 vcpu->arch.slb[i].valid ? 'v' : ' ',
0054 vcpu->arch.slb[i].large ? 'l' : ' ',
0055 vcpu->arch.slb[i].tb ? 't' : ' ',
0056 vcpu->arch.slb[i].esid,
0057 vcpu->arch.slb[i].vsid);
0058 }
0059
0060 return NULL;
0061 }
0062
0063 static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
0064 {
0065 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
0066 }
0067
0068 static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
0069 {
0070 return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
0071 }
0072
0073 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
0074 {
0075 eaddr &= kvmppc_slb_offset_mask(slb);
0076
0077 return (eaddr >> VPN_SHIFT) |
0078 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
0079 }
0080
0081 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
0082 bool data)
0083 {
0084 struct kvmppc_slb *slb;
0085
0086 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
0087 if (!slb)
0088 return 0;
0089
0090 return kvmppc_slb_calc_vpn(slb, eaddr);
0091 }
0092
0093 static int mmu_pagesize(int mmu_pg)
0094 {
0095 switch (mmu_pg) {
0096 case MMU_PAGE_64K:
0097 return 16;
0098 case MMU_PAGE_16M:
0099 return 24;
0100 }
0101 return 12;
0102 }
0103
0104 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
0105 {
0106 return mmu_pagesize(slbe->base_page_size);
0107 }
0108
0109 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
0110 {
0111 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
0112
0113 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
0114 }
0115
0116 static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
0117 struct kvmppc_slb *slbe, gva_t eaddr,
0118 bool second)
0119 {
0120 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
0121 u64 hash, pteg, htabsize;
0122 u32 ssize;
0123 hva_t r;
0124 u64 vpn;
0125
0126 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
0127
0128 vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
0129 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
0130 hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
0131 if (second)
0132 hash = ~hash;
0133 hash &= ((1ULL << 39ULL) - 1ULL);
0134 hash &= htabsize;
0135 hash <<= 7ULL;
0136
0137 pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
0138 pteg |= hash;
0139
0140 dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
0141 page, vcpu_book3s->sdr1, pteg, slbe->vsid);
0142
0143
0144
0145 if (vcpu->arch.papr_enabled)
0146 r = pteg;
0147 else
0148 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
0149
0150 if (kvm_is_error_hva(r))
0151 return r;
0152 return r | (pteg & ~PAGE_MASK);
0153 }
0154
0155 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
0156 {
0157 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
0158 u64 avpn;
0159
0160 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
0161 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
0162
0163 if (p < 16)
0164 avpn >>= ((80 - p) - 56) - 8;
0165 else
0166 avpn <<= p - 16;
0167
0168 return avpn;
0169 }
0170
0171
0172
0173
0174
0175
0176 static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
0177 {
0178 switch (slbe->base_page_size) {
0179 case MMU_PAGE_64K:
0180 if ((r & 0xf000) == 0x1000)
0181 return MMU_PAGE_64K;
0182 break;
0183 case MMU_PAGE_16M:
0184 if ((r & 0xff000) == 0)
0185 return MMU_PAGE_16M;
0186 break;
0187 }
0188 return -1;
0189 }
0190
0191 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
0192 struct kvmppc_pte *gpte, bool data,
0193 bool iswrite)
0194 {
0195 struct kvmppc_slb *slbe;
0196 hva_t ptegp;
0197 u64 pteg[16];
0198 u64 avpn = 0;
0199 u64 r;
0200 u64 v_val, v_mask;
0201 u64 eaddr_mask;
0202 int i;
0203 u8 pp, key = 0;
0204 bool found = false;
0205 bool second = false;
0206 int pgsize;
0207 ulong mp_ea = vcpu->arch.magic_page_ea;
0208
0209
0210 if (unlikely(mp_ea) &&
0211 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
0212 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
0213 gpte->eaddr = eaddr;
0214 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
0215 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
0216 gpte->raddr &= KVM_PAM;
0217 gpte->may_execute = true;
0218 gpte->may_read = true;
0219 gpte->may_write = true;
0220 gpte->page_size = MMU_PAGE_4K;
0221 gpte->wimg = HPTE_R_M;
0222
0223 return 0;
0224 }
0225
0226 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
0227 if (!slbe)
0228 goto no_seg_found;
0229
0230 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
0231 v_val = avpn & HPTE_V_AVPN;
0232
0233 if (slbe->tb)
0234 v_val |= SLB_VSID_B_1T;
0235 if (slbe->large)
0236 v_val |= HPTE_V_LARGE;
0237 v_val |= HPTE_V_VALID;
0238
0239 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
0240 HPTE_V_SECONDARY;
0241
0242 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
0243
0244 mutex_lock(&vcpu->kvm->arch.hpt_mutex);
0245
0246 do_second:
0247 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
0248 if (kvm_is_error_hva(ptegp))
0249 goto no_page_found;
0250
0251 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
0252 printk_ratelimited(KERN_ERR
0253 "KVM: Can't copy data from 0x%lx!\n", ptegp);
0254 goto no_page_found;
0255 }
0256
0257 if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
0258 key = 4;
0259 else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
0260 key = 4;
0261
0262 for (i=0; i<16; i+=2) {
0263 u64 pte0 = be64_to_cpu(pteg[i]);
0264 u64 pte1 = be64_to_cpu(pteg[i + 1]);
0265
0266
0267 if ((pte0 & v_mask) == v_val) {
0268
0269 if (slbe->large &&
0270 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
0271 pgsize = decode_pagesize(slbe, pte1);
0272 if (pgsize < 0)
0273 continue;
0274 }
0275 found = true;
0276 break;
0277 }
0278 }
0279
0280 if (!found) {
0281 if (second)
0282 goto no_page_found;
0283 v_val |= HPTE_V_SECONDARY;
0284 second = true;
0285 goto do_second;
0286 }
0287
0288 r = be64_to_cpu(pteg[i+1]);
0289 pp = (r & HPTE_R_PP) | key;
0290 if (r & HPTE_R_PP0)
0291 pp |= 8;
0292
0293 gpte->eaddr = eaddr;
0294 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
0295
0296 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
0297 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
0298 gpte->page_size = pgsize;
0299 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
0300 if (unlikely(vcpu->arch.disable_kernel_nx) &&
0301 !(kvmppc_get_msr(vcpu) & MSR_PR))
0302 gpte->may_execute = true;
0303 gpte->may_read = false;
0304 gpte->may_write = false;
0305 gpte->wimg = r & HPTE_R_WIMG;
0306
0307 switch (pp) {
0308 case 0:
0309 case 1:
0310 case 2:
0311 case 6:
0312 gpte->may_write = true;
0313 fallthrough;
0314 case 3:
0315 case 5:
0316 case 7:
0317 case 10:
0318 gpte->may_read = true;
0319 break;
0320 }
0321
0322 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
0323 "-> 0x%lx\n",
0324 eaddr, avpn, gpte->vpage, gpte->raddr);
0325
0326
0327
0328 if (gpte->may_read && !(r & HPTE_R_R)) {
0329
0330
0331
0332
0333
0334
0335
0336 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
0337 r |= HPTE_R_R;
0338 put_user(r >> 8, addr + 6);
0339 }
0340 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
0341
0342
0343 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
0344 r |= HPTE_R_C;
0345 put_user(r, addr + 7);
0346 }
0347
0348 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
0349
0350 if (!gpte->may_read || (iswrite && !gpte->may_write))
0351 return -EPERM;
0352 return 0;
0353
0354 no_page_found:
0355 mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
0356 return -ENOENT;
0357
0358 no_seg_found:
0359 dprintk("KVM MMU: Trigger segment fault\n");
0360 return -EINVAL;
0361 }
0362
0363 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
0364 {
0365 u64 esid, esid_1t;
0366 int slb_nr;
0367 struct kvmppc_slb *slbe;
0368
0369 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
0370
0371 esid = GET_ESID(rb);
0372 esid_1t = GET_ESID_1T(rb);
0373 slb_nr = rb & 0xfff;
0374
0375 if (slb_nr > vcpu->arch.slb_nr)
0376 return;
0377
0378 slbe = &vcpu->arch.slb[slb_nr];
0379
0380 slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
0381 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
0382 slbe->esid = slbe->tb ? esid_1t : esid;
0383 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
0384 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
0385 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
0386 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
0387 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0;
0388 slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
0389
0390 slbe->base_page_size = MMU_PAGE_4K;
0391 if (slbe->large) {
0392 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
0393 switch (rs & SLB_VSID_LP) {
0394 case SLB_VSID_LP_00:
0395 slbe->base_page_size = MMU_PAGE_16M;
0396 break;
0397 case SLB_VSID_LP_01:
0398 slbe->base_page_size = MMU_PAGE_64K;
0399 break;
0400 }
0401 } else
0402 slbe->base_page_size = MMU_PAGE_16M;
0403 }
0404
0405 slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
0406 slbe->origv = rs;
0407
0408
0409 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
0410 }
0411
0412 static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
0413 ulong *ret_slb)
0414 {
0415 struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
0416
0417 if (slbe) {
0418 *ret_slb = slbe->origv;
0419 return 0;
0420 }
0421 *ret_slb = 0;
0422 return -ENOENT;
0423 }
0424
0425 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
0426 {
0427 struct kvmppc_slb *slbe;
0428
0429 if (slb_nr > vcpu->arch.slb_nr)
0430 return 0;
0431
0432 slbe = &vcpu->arch.slb[slb_nr];
0433
0434 return slbe->orige;
0435 }
0436
0437 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
0438 {
0439 struct kvmppc_slb *slbe;
0440
0441 if (slb_nr > vcpu->arch.slb_nr)
0442 return 0;
0443
0444 slbe = &vcpu->arch.slb[slb_nr];
0445
0446 return slbe->origv;
0447 }
0448
0449 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
0450 {
0451 struct kvmppc_slb *slbe;
0452 u64 seg_size;
0453
0454 dprintk("KVM MMU: slbie(0x%llx)\n", ea);
0455
0456 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
0457
0458 if (!slbe)
0459 return;
0460
0461 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
0462
0463 slbe->valid = false;
0464 slbe->orige = 0;
0465 slbe->origv = 0;
0466
0467 seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
0468 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
0469 }
0470
0471 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
0472 {
0473 int i;
0474
0475 dprintk("KVM MMU: slbia()\n");
0476
0477 for (i = 1; i < vcpu->arch.slb_nr; i++) {
0478 vcpu->arch.slb[i].valid = false;
0479 vcpu->arch.slb[i].orige = 0;
0480 vcpu->arch.slb[i].origv = 0;
0481 }
0482
0483 if (kvmppc_get_msr(vcpu) & MSR_IR) {
0484 kvmppc_mmu_flush_segments(vcpu);
0485 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
0486 }
0487 }
0488
0489 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
0490 ulong value)
0491 {
0492 u64 rb = 0, rs = 0;
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512 dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
0513
0514
0515 rb |= (srnum & 0xf) << 28;
0516
0517 rb |= 1 << 27;
0518
0519 rb |= srnum;
0520
0521
0522 rs |= (value & 0xfffffff) << 12;
0523
0524 rs |= ((value >> 28) & 0x7) << 9;
0525
0526 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
0527 }
0528
0529 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
0530 bool large)
0531 {
0532 u64 mask = 0xFFFFFFFFFULL;
0533 unsigned long i;
0534 struct kvm_vcpu *v;
0535
0536 dprintk("KVM MMU: tlbie(0x%lx)\n", va);
0537
0538
0539
0540
0541
0542
0543
0544 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
0545
0546 if (va & 1) {
0547 if ((va & 0xf000) == 0x1000)
0548 mask = 0xFFFFFFFF0ULL;
0549 else
0550 mask = 0xFFFFFF000ULL;
0551 }
0552 } else {
0553
0554 if (large)
0555 mask = 0xFFFFFF000ULL;
0556 }
0557
0558 kvm_for_each_vcpu(i, v, vcpu->kvm)
0559 kvmppc_mmu_pte_vflush(v, va >> 12, mask);
0560 }
0561
0562 #ifdef CONFIG_PPC_64K_PAGES
0563 static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
0564 {
0565 ulong mp_ea = vcpu->arch.magic_page_ea;
0566
0567 return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
0568 (mp_ea >> SID_SHIFT) == esid;
0569 }
0570 #endif
0571
0572 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
0573 u64 *vsid)
0574 {
0575 ulong ea = esid << SID_SHIFT;
0576 struct kvmppc_slb *slb;
0577 u64 gvsid = esid;
0578 ulong mp_ea = vcpu->arch.magic_page_ea;
0579 int pagesize = MMU_PAGE_64K;
0580 u64 msr = kvmppc_get_msr(vcpu);
0581
0582 if (msr & (MSR_DR|MSR_IR)) {
0583 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
0584 if (slb) {
0585 gvsid = slb->vsid;
0586 pagesize = slb->base_page_size;
0587 if (slb->tb) {
0588 gvsid <<= SID_SHIFT_1T - SID_SHIFT;
0589 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
0590 gvsid |= VSID_1T;
0591 }
0592 }
0593 }
0594
0595 switch (msr & (MSR_DR|MSR_IR)) {
0596 case 0:
0597 gvsid = VSID_REAL | esid;
0598 break;
0599 case MSR_IR:
0600 gvsid |= VSID_REAL_IR;
0601 break;
0602 case MSR_DR:
0603 gvsid |= VSID_REAL_DR;
0604 break;
0605 case MSR_DR|MSR_IR:
0606 if (!slb)
0607 goto no_slb;
0608
0609 break;
0610 default:
0611 BUG();
0612 break;
0613 }
0614
0615 #ifdef CONFIG_PPC_64K_PAGES
0616
0617
0618
0619
0620
0621
0622 if (pagesize >= MMU_PAGE_64K &&
0623 mmu_psize_defs[MMU_PAGE_64K].shift &&
0624 !segment_contains_magic_page(vcpu, esid))
0625 gvsid |= VSID_64K;
0626 #endif
0627
0628 if (kvmppc_get_msr(vcpu) & MSR_PR)
0629 gvsid |= VSID_PR;
0630
0631 *vsid = gvsid;
0632 return 0;
0633
0634 no_slb:
0635
0636 if (unlikely(mp_ea) &&
0637 unlikely(esid == (mp_ea >> SID_SHIFT)) &&
0638 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
0639 *vsid = VSID_REAL | esid;
0640 return 0;
0641 }
0642
0643 return -EINVAL;
0644 }
0645
0646 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
0647 {
0648 return (to_book3s(vcpu)->hid[5] & 0x80);
0649 }
0650
0651 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
0652 {
0653 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
0654
0655 mmu->mfsrin = NULL;
0656 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
0657 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
0658 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
0659 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
0660 mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
0661 mmu->slbie = kvmppc_mmu_book3s_64_slbie;
0662 mmu->slbia = kvmppc_mmu_book3s_64_slbia;
0663 mmu->xlate = kvmppc_mmu_book3s_64_xlate;
0664 mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
0665 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
0666 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
0667 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
0668
0669 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
0670 }