0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/kernel.h>
0012 #include <linux/kvm_host.h>
0013 #include <linux/llist.h>
0014 #include <linux/pgtable.h>
0015
0016 #include <asm/kvm_ppc.h>
0017 #include <asm/kvm_book3s.h>
0018 #include <asm/mmu.h>
0019 #include <asm/pgalloc.h>
0020 #include <asm/pte-walk.h>
0021 #include <asm/reg.h>
0022 #include <asm/plpar_wrappers.h>
0023 #include <asm/firmware.h>
0024
0025 static struct patb_entry *pseries_partition_tb;
0026
0027 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
0028 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
0029
0030 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
0031 {
0032 struct kvmppc_vcore *vc = vcpu->arch.vcore;
0033
0034 hr->pcr = vc->pcr | PCR_MASK;
0035 hr->dpdes = vc->dpdes;
0036 hr->hfscr = vcpu->arch.hfscr;
0037 hr->tb_offset = vc->tb_offset;
0038 hr->dawr0 = vcpu->arch.dawr0;
0039 hr->dawrx0 = vcpu->arch.dawrx0;
0040 hr->ciabr = vcpu->arch.ciabr;
0041 hr->purr = vcpu->arch.purr;
0042 hr->spurr = vcpu->arch.spurr;
0043 hr->ic = vcpu->arch.ic;
0044 hr->vtb = vc->vtb;
0045 hr->srr0 = vcpu->arch.shregs.srr0;
0046 hr->srr1 = vcpu->arch.shregs.srr1;
0047 hr->sprg[0] = vcpu->arch.shregs.sprg0;
0048 hr->sprg[1] = vcpu->arch.shregs.sprg1;
0049 hr->sprg[2] = vcpu->arch.shregs.sprg2;
0050 hr->sprg[3] = vcpu->arch.shregs.sprg3;
0051 hr->pidr = vcpu->arch.pid;
0052 hr->cfar = vcpu->arch.cfar;
0053 hr->ppr = vcpu->arch.ppr;
0054 hr->dawr1 = vcpu->arch.dawr1;
0055 hr->dawrx1 = vcpu->arch.dawrx1;
0056 }
0057
0058
0059 static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
0060 {
0061 unsigned long *addr = (unsigned long *) regs;
0062
0063 for (; addr < ((unsigned long *) (regs + 1)); addr++)
0064 *addr = swab64(*addr);
0065 }
0066
0067 static void byteswap_hv_regs(struct hv_guest_state *hr)
0068 {
0069 hr->version = swab64(hr->version);
0070 hr->lpid = swab32(hr->lpid);
0071 hr->vcpu_token = swab32(hr->vcpu_token);
0072 hr->lpcr = swab64(hr->lpcr);
0073 hr->pcr = swab64(hr->pcr) | PCR_MASK;
0074 hr->amor = swab64(hr->amor);
0075 hr->dpdes = swab64(hr->dpdes);
0076 hr->hfscr = swab64(hr->hfscr);
0077 hr->tb_offset = swab64(hr->tb_offset);
0078 hr->dawr0 = swab64(hr->dawr0);
0079 hr->dawrx0 = swab64(hr->dawrx0);
0080 hr->ciabr = swab64(hr->ciabr);
0081 hr->hdec_expiry = swab64(hr->hdec_expiry);
0082 hr->purr = swab64(hr->purr);
0083 hr->spurr = swab64(hr->spurr);
0084 hr->ic = swab64(hr->ic);
0085 hr->vtb = swab64(hr->vtb);
0086 hr->hdar = swab64(hr->hdar);
0087 hr->hdsisr = swab64(hr->hdsisr);
0088 hr->heir = swab64(hr->heir);
0089 hr->asdr = swab64(hr->asdr);
0090 hr->srr0 = swab64(hr->srr0);
0091 hr->srr1 = swab64(hr->srr1);
0092 hr->sprg[0] = swab64(hr->sprg[0]);
0093 hr->sprg[1] = swab64(hr->sprg[1]);
0094 hr->sprg[2] = swab64(hr->sprg[2]);
0095 hr->sprg[3] = swab64(hr->sprg[3]);
0096 hr->pidr = swab64(hr->pidr);
0097 hr->cfar = swab64(hr->cfar);
0098 hr->ppr = swab64(hr->ppr);
0099 hr->dawr1 = swab64(hr->dawr1);
0100 hr->dawrx1 = swab64(hr->dawrx1);
0101 }
0102
0103 static void save_hv_return_state(struct kvm_vcpu *vcpu,
0104 struct hv_guest_state *hr)
0105 {
0106 struct kvmppc_vcore *vc = vcpu->arch.vcore;
0107
0108 hr->dpdes = vc->dpdes;
0109 hr->purr = vcpu->arch.purr;
0110 hr->spurr = vcpu->arch.spurr;
0111 hr->ic = vcpu->arch.ic;
0112 hr->vtb = vc->vtb;
0113 hr->srr0 = vcpu->arch.shregs.srr0;
0114 hr->srr1 = vcpu->arch.shregs.srr1;
0115 hr->sprg[0] = vcpu->arch.shregs.sprg0;
0116 hr->sprg[1] = vcpu->arch.shregs.sprg1;
0117 hr->sprg[2] = vcpu->arch.shregs.sprg2;
0118 hr->sprg[3] = vcpu->arch.shregs.sprg3;
0119 hr->pidr = vcpu->arch.pid;
0120 hr->cfar = vcpu->arch.cfar;
0121 hr->ppr = vcpu->arch.ppr;
0122 switch (vcpu->arch.trap) {
0123 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
0124 hr->hdar = vcpu->arch.fault_dar;
0125 hr->hdsisr = vcpu->arch.fault_dsisr;
0126 hr->asdr = vcpu->arch.fault_gpa;
0127 break;
0128 case BOOK3S_INTERRUPT_H_INST_STORAGE:
0129 hr->asdr = vcpu->arch.fault_gpa;
0130 break;
0131 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
0132 hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
0133 (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
0134 break;
0135 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
0136 hr->heir = vcpu->arch.emul_inst;
0137 break;
0138 }
0139 }
0140
0141 static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr)
0142 {
0143 struct kvmppc_vcore *vc = vcpu->arch.vcore;
0144
0145 vc->pcr = hr->pcr | PCR_MASK;
0146 vc->dpdes = hr->dpdes;
0147 vcpu->arch.hfscr = hr->hfscr;
0148 vcpu->arch.dawr0 = hr->dawr0;
0149 vcpu->arch.dawrx0 = hr->dawrx0;
0150 vcpu->arch.ciabr = hr->ciabr;
0151 vcpu->arch.purr = hr->purr;
0152 vcpu->arch.spurr = hr->spurr;
0153 vcpu->arch.ic = hr->ic;
0154 vc->vtb = hr->vtb;
0155 vcpu->arch.shregs.srr0 = hr->srr0;
0156 vcpu->arch.shregs.srr1 = hr->srr1;
0157 vcpu->arch.shregs.sprg0 = hr->sprg[0];
0158 vcpu->arch.shregs.sprg1 = hr->sprg[1];
0159 vcpu->arch.shregs.sprg2 = hr->sprg[2];
0160 vcpu->arch.shregs.sprg3 = hr->sprg[3];
0161 vcpu->arch.pid = hr->pidr;
0162 vcpu->arch.cfar = hr->cfar;
0163 vcpu->arch.ppr = hr->ppr;
0164 vcpu->arch.dawr1 = hr->dawr1;
0165 vcpu->arch.dawrx1 = hr->dawrx1;
0166 }
0167
0168 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
0169 struct hv_guest_state *hr)
0170 {
0171 struct kvmppc_vcore *vc = vcpu->arch.vcore;
0172
0173 vc->dpdes = hr->dpdes;
0174 vcpu->arch.hfscr = hr->hfscr;
0175 vcpu->arch.purr = hr->purr;
0176 vcpu->arch.spurr = hr->spurr;
0177 vcpu->arch.ic = hr->ic;
0178 vc->vtb = hr->vtb;
0179 vcpu->arch.fault_dar = hr->hdar;
0180 vcpu->arch.fault_dsisr = hr->hdsisr;
0181 vcpu->arch.fault_gpa = hr->asdr;
0182 vcpu->arch.emul_inst = hr->heir;
0183 vcpu->arch.shregs.srr0 = hr->srr0;
0184 vcpu->arch.shregs.srr1 = hr->srr1;
0185 vcpu->arch.shregs.sprg0 = hr->sprg[0];
0186 vcpu->arch.shregs.sprg1 = hr->sprg[1];
0187 vcpu->arch.shregs.sprg2 = hr->sprg[2];
0188 vcpu->arch.shregs.sprg3 = hr->sprg[3];
0189 vcpu->arch.pid = hr->pidr;
0190 vcpu->arch.cfar = hr->cfar;
0191 vcpu->arch.ppr = hr->ppr;
0192 }
0193
0194 static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
0195 {
0196
0197 vcpu->arch.trap = 0;
0198
0199
0200
0201
0202
0203
0204
0205 if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
0206 && (vcpu->mmio_is_write == 0)) {
0207 vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
0208 offsetof(struct pt_regs,
0209 gpr[vcpu->arch.io_gpr]);
0210 vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
0211 }
0212 }
0213
0214 static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
0215 struct hv_guest_state *l2_hv,
0216 struct pt_regs *l2_regs,
0217 u64 hv_ptr, u64 regs_ptr)
0218 {
0219 int size;
0220
0221 if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version,
0222 sizeof(l2_hv->version)))
0223 return -1;
0224
0225 if (kvmppc_need_byteswap(vcpu))
0226 l2_hv->version = swab64(l2_hv->version);
0227
0228 size = hv_guest_state_size(l2_hv->version);
0229 if (size < 0)
0230 return -1;
0231
0232 return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
0233 kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
0234 sizeof(struct pt_regs));
0235 }
0236
0237 static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
0238 struct hv_guest_state *l2_hv,
0239 struct pt_regs *l2_regs,
0240 u64 hv_ptr, u64 regs_ptr)
0241 {
0242 int size;
0243
0244 size = hv_guest_state_size(l2_hv->version);
0245 if (size < 0)
0246 return -1;
0247
0248 return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
0249 kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
0250 sizeof(struct pt_regs));
0251 }
0252
0253 static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
0254 const struct hv_guest_state *l2_hv,
0255 const struct hv_guest_state *l1_hv, u64 *lpcr)
0256 {
0257 struct kvmppc_vcore *vc = vcpu->arch.vcore;
0258 u64 mask;
0259
0260 restore_hv_regs(vcpu, l2_hv);
0261
0262
0263
0264
0265 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
0266
0267
0268
0269
0270
0271 *lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
0272 (vc->lpcr & ~mask) | (*lpcr & mask));
0273
0274
0275
0276
0277
0278 vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted);
0279
0280
0281 vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
0282 vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP;
0283
0284
0285 if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
0286 vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV;
0287 }
0288
0289 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
0290 {
0291 long int err, r;
0292 struct kvm_nested_guest *l2;
0293 struct pt_regs l2_regs, saved_l1_regs;
0294 struct hv_guest_state l2_hv = {0}, saved_l1_hv;
0295 struct kvmppc_vcore *vc = vcpu->arch.vcore;
0296 u64 hv_ptr, regs_ptr;
0297 u64 hdec_exp, lpcr;
0298 s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
0299
0300 if (vcpu->kvm->arch.l1_ptcr == 0)
0301 return H_NOT_AVAILABLE;
0302
0303 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
0304 return H_BAD_MODE;
0305
0306
0307 hv_ptr = kvmppc_get_gpr(vcpu, 4);
0308 regs_ptr = kvmppc_get_gpr(vcpu, 5);
0309 kvm_vcpu_srcu_read_lock(vcpu);
0310 err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
0311 hv_ptr, regs_ptr);
0312 kvm_vcpu_srcu_read_unlock(vcpu);
0313 if (err)
0314 return H_PARAMETER;
0315
0316 if (kvmppc_need_byteswap(vcpu))
0317 byteswap_hv_regs(&l2_hv);
0318 if (l2_hv.version > HV_GUEST_STATE_VERSION)
0319 return H_P2;
0320
0321 if (kvmppc_need_byteswap(vcpu))
0322 byteswap_pt_regs(&l2_regs);
0323 if (l2_hv.vcpu_token >= NR_CPUS)
0324 return H_PARAMETER;
0325
0326
0327
0328
0329
0330
0331
0332
0333 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
0334 if (!MSR_TM_ACTIVE(l2_regs.msr))
0335 return H_BAD_MODE;
0336 } else {
0337 if (l2_regs.msr & MSR_TS_MASK)
0338 return H_BAD_MODE;
0339 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
0340 return H_BAD_MODE;
0341 }
0342
0343
0344 l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
0345 if (!l2)
0346 return H_PARAMETER;
0347 if (!l2->l1_gr_to_hr) {
0348 mutex_lock(&l2->tlb_lock);
0349 kvmhv_update_ptbl_cache(l2);
0350 mutex_unlock(&l2->tlb_lock);
0351 }
0352
0353
0354 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
0355 saved_l1_regs = vcpu->arch.regs;
0356 kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
0357
0358
0359 hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
0360 vc->tb_offset += l2_hv.tb_offset;
0361 vcpu->arch.dec_expires += l2_hv.tb_offset;
0362
0363
0364 vcpu->arch.nested = l2;
0365 vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
0366 vcpu->arch.nested_hfscr = l2_hv.hfscr;
0367 vcpu->arch.regs = l2_regs;
0368
0369
0370 vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
0371
0372 lpcr = l2_hv.lpcr;
0373 load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr);
0374
0375 vcpu->arch.ret = RESUME_GUEST;
0376 vcpu->arch.trap = 0;
0377 do {
0378 r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
0379 } while (is_kvmppc_resume_guest(r));
0380
0381
0382 l2_regs = vcpu->arch.regs;
0383 l2_regs.msr = vcpu->arch.shregs.msr;
0384 delta_purr = vcpu->arch.purr - l2_hv.purr;
0385 delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
0386 delta_ic = vcpu->arch.ic - l2_hv.ic;
0387 delta_vtb = vc->vtb - l2_hv.vtb;
0388 save_hv_return_state(vcpu, &l2_hv);
0389
0390
0391 vcpu->arch.nested = NULL;
0392 vcpu->arch.regs = saved_l1_regs;
0393 vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
0394
0395 if (l2_regs.msr & MSR_TS_MASK)
0396 vcpu->arch.shregs.msr |= MSR_TS_S;
0397 vc->tb_offset = saved_l1_hv.tb_offset;
0398
0399 vcpu->arch.dec_expires -= l2_hv.tb_offset;
0400 restore_hv_regs(vcpu, &saved_l1_hv);
0401 vcpu->arch.purr += delta_purr;
0402 vcpu->arch.spurr += delta_spurr;
0403 vcpu->arch.ic += delta_ic;
0404 vc->vtb += delta_vtb;
0405
0406 kvmhv_put_nested(l2);
0407
0408
0409 if (kvmppc_need_byteswap(vcpu)) {
0410 byteswap_hv_regs(&l2_hv);
0411 byteswap_pt_regs(&l2_regs);
0412 }
0413 kvm_vcpu_srcu_read_lock(vcpu);
0414 err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
0415 hv_ptr, regs_ptr);
0416 kvm_vcpu_srcu_read_unlock(vcpu);
0417 if (err)
0418 return H_AUTHORITY;
0419
0420 if (r == -EINTR)
0421 return H_INTERRUPT;
0422
0423 if (vcpu->mmio_needed) {
0424 kvmhv_nested_mmio_needed(vcpu, regs_ptr);
0425 return H_TOO_HARD;
0426 }
0427
0428 return vcpu->arch.trap;
0429 }
0430
0431 long kvmhv_nested_init(void)
0432 {
0433 long int ptb_order;
0434 unsigned long ptcr;
0435 long rc;
0436
0437 if (!kvmhv_on_pseries())
0438 return 0;
0439 if (!radix_enabled())
0440 return -ENODEV;
0441
0442
0443 ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
0444
0445 if (ptb_order < 12)
0446 ptb_order = 12;
0447 pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
0448 GFP_KERNEL);
0449 if (!pseries_partition_tb) {
0450 pr_err("kvm-hv: failed to allocated nested partition table\n");
0451 return -ENOMEM;
0452 }
0453
0454 ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
0455 rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
0456 if (rc != H_SUCCESS) {
0457 pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
0458 rc);
0459 kfree(pseries_partition_tb);
0460 pseries_partition_tb = NULL;
0461 return -ENODEV;
0462 }
0463
0464 return 0;
0465 }
0466
0467 void kvmhv_nested_exit(void)
0468 {
0469
0470
0471
0472
0473
0474 if (kvmhv_on_pseries() && pseries_partition_tb) {
0475 plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
0476 kfree(pseries_partition_tb);
0477 pseries_partition_tb = NULL;
0478 }
0479 }
0480
0481 static void kvmhv_flush_lpid(unsigned int lpid)
0482 {
0483 long rc;
0484
0485 if (!kvmhv_on_pseries()) {
0486 radix__flush_all_lpid(lpid);
0487 return;
0488 }
0489
0490 if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
0491 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
0492 lpid, TLBIEL_INVAL_SET_LPID);
0493 else
0494 rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
0495 H_RPTI_TYPE_NESTED |
0496 H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
0497 H_RPTI_TYPE_PAT,
0498 H_RPTI_PAGE_ALL, 0, -1UL);
0499 if (rc)
0500 pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
0501 }
0502
0503 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
0504 {
0505 if (!kvmhv_on_pseries()) {
0506 mmu_partition_table_set_entry(lpid, dw0, dw1, true);
0507 return;
0508 }
0509
0510 pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
0511 pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
0512
0513 kvmhv_flush_lpid(lpid);
0514 }
0515
0516 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
0517 {
0518 unsigned long dw0;
0519
0520 dw0 = PATB_HR | radix__get_tree_size() |
0521 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
0522 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
0523 }
0524
0525
0526
0527
0528
0529
0530 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
0531 {
0532 struct kvm *kvm = vcpu->kvm;
0533 unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
0534 int srcu_idx;
0535 long ret = H_SUCCESS;
0536
0537 srcu_idx = srcu_read_lock(&kvm->srcu);
0538
0539 if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
0540 !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
0541 ret = H_PARAMETER;
0542 srcu_read_unlock(&kvm->srcu, srcu_idx);
0543 if (ret == H_SUCCESS)
0544 kvm->arch.l1_ptcr = ptcr;
0545
0546 return ret;
0547 }
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
0559 {
0560 struct kvm_nested_guest *gp;
0561 int l1_lpid = kvmppc_get_gpr(vcpu, 4);
0562 int pid = kvmppc_get_gpr(vcpu, 5);
0563 gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
0564 gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
0565 gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
0566 void *buf;
0567 unsigned long n = kvmppc_get_gpr(vcpu, 9);
0568 bool is_load = !!gp_to;
0569 long rc;
0570
0571 if (gp_to && gp_from)
0572 return H_PARAMETER;
0573
0574 if (eaddr & (0xFFFUL << 52))
0575 return H_PARAMETER;
0576
0577 buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
0578 if (!buf)
0579 return H_NO_MEM;
0580
0581 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
0582 if (!gp) {
0583 rc = H_PARAMETER;
0584 goto out_free;
0585 }
0586
0587 mutex_lock(&gp->tlb_lock);
0588
0589 if (is_load) {
0590
0591 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
0592 eaddr, buf, NULL, n);
0593 if (rc)
0594 goto not_found;
0595
0596
0597 kvm_vcpu_srcu_read_lock(vcpu);
0598 rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
0599 kvm_vcpu_srcu_read_unlock(vcpu);
0600 if (rc)
0601 goto not_found;
0602 } else {
0603
0604 kvm_vcpu_srcu_read_lock(vcpu);
0605 rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
0606 kvm_vcpu_srcu_read_unlock(vcpu);
0607 if (rc)
0608 goto not_found;
0609
0610
0611 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
0612 eaddr, NULL, buf, n);
0613 if (rc)
0614 goto not_found;
0615 }
0616
0617 out_unlock:
0618 mutex_unlock(&gp->tlb_lock);
0619 kvmhv_put_nested(gp);
0620 out_free:
0621 kfree(buf);
0622 return rc;
0623 not_found:
0624 rc = H_NOT_FOUND;
0625 goto out_unlock;
0626 }
0627
0628
0629
0630
0631
0632 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
0633 {
0634 int ret;
0635 struct patb_entry ptbl_entry;
0636 unsigned long ptbl_addr;
0637 struct kvm *kvm = gp->l1_host;
0638
0639 ret = -EFAULT;
0640 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
0641 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
0642 int srcu_idx = srcu_read_lock(&kvm->srcu);
0643 ret = kvm_read_guest(kvm, ptbl_addr,
0644 &ptbl_entry, sizeof(ptbl_entry));
0645 srcu_read_unlock(&kvm->srcu, srcu_idx);
0646 }
0647 if (ret) {
0648 gp->l1_gr_to_hr = 0;
0649 gp->process_table = 0;
0650 } else {
0651 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
0652 gp->process_table = be64_to_cpu(ptbl_entry.patb1);
0653 }
0654 kvmhv_set_nested_ptbl(gp);
0655 }
0656
0657 void kvmhv_vm_nested_init(struct kvm *kvm)
0658 {
0659 idr_init(&kvm->arch.kvm_nested_guest_idr);
0660 }
0661
0662 static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
0663 {
0664 return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
0665 }
0666
0667 static bool __prealloc_nested(struct kvm *kvm, int lpid)
0668 {
0669 if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
0670 NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
0671 return false;
0672 return true;
0673 }
0674
0675 static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
0676 {
0677 if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
0678 WARN_ON(1);
0679 }
0680
0681 static void __remove_nested(struct kvm *kvm, int lpid)
0682 {
0683 idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
0684 }
0685
0686 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
0687 {
0688 struct kvm_nested_guest *gp;
0689 long shadow_lpid;
0690
0691 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
0692 if (!gp)
0693 return NULL;
0694 gp->l1_host = kvm;
0695 gp->l1_lpid = lpid;
0696 mutex_init(&gp->tlb_lock);
0697 gp->shadow_pgtable = pgd_alloc(kvm->mm);
0698 if (!gp->shadow_pgtable)
0699 goto out_free;
0700 shadow_lpid = kvmppc_alloc_lpid();
0701 if (shadow_lpid < 0)
0702 goto out_free2;
0703 gp->shadow_lpid = shadow_lpid;
0704 gp->radix = 1;
0705
0706 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
0707
0708 return gp;
0709
0710 out_free2:
0711 pgd_free(kvm->mm, gp->shadow_pgtable);
0712 out_free:
0713 kfree(gp);
0714 return NULL;
0715 }
0716
0717
0718
0719
0720 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
0721 {
0722 struct kvm *kvm = gp->l1_host;
0723
0724 if (gp->shadow_pgtable) {
0725
0726
0727
0728
0729
0730 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
0731 gp->shadow_lpid);
0732 pgd_free(kvm->mm, gp->shadow_pgtable);
0733 }
0734 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
0735 kvmppc_free_lpid(gp->shadow_lpid);
0736 kfree(gp);
0737 }
0738
0739 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
0740 {
0741 struct kvm *kvm = gp->l1_host;
0742 int lpid = gp->l1_lpid;
0743 long ref;
0744
0745 spin_lock(&kvm->mmu_lock);
0746 if (gp == __find_nested(kvm, lpid)) {
0747 __remove_nested(kvm, lpid);
0748 --gp->refcnt;
0749 }
0750 ref = gp->refcnt;
0751 spin_unlock(&kvm->mmu_lock);
0752 if (ref == 0)
0753 kvmhv_release_nested(gp);
0754 }
0755
0756
0757
0758
0759
0760
0761
0762 void kvmhv_release_all_nested(struct kvm *kvm)
0763 {
0764 int lpid;
0765 struct kvm_nested_guest *gp;
0766 struct kvm_nested_guest *freelist = NULL;
0767 struct kvm_memory_slot *memslot;
0768 int srcu_idx, bkt;
0769
0770 spin_lock(&kvm->mmu_lock);
0771 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
0772 __remove_nested(kvm, lpid);
0773 if (--gp->refcnt == 0) {
0774 gp->next = freelist;
0775 freelist = gp;
0776 }
0777 }
0778 idr_destroy(&kvm->arch.kvm_nested_guest_idr);
0779
0780 spin_unlock(&kvm->mmu_lock);
0781 while ((gp = freelist) != NULL) {
0782 freelist = gp->next;
0783 kvmhv_release_nested(gp);
0784 }
0785
0786 srcu_idx = srcu_read_lock(&kvm->srcu);
0787 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
0788 kvmhv_free_memslot_nest_rmap(memslot);
0789 srcu_read_unlock(&kvm->srcu, srcu_idx);
0790 }
0791
0792
0793 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
0794 {
0795 struct kvm *kvm = gp->l1_host;
0796
0797 spin_lock(&kvm->mmu_lock);
0798 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
0799 spin_unlock(&kvm->mmu_lock);
0800 kvmhv_flush_lpid(gp->shadow_lpid);
0801 kvmhv_update_ptbl_cache(gp);
0802 if (gp->l1_gr_to_hr == 0)
0803 kvmhv_remove_nested(gp);
0804 }
0805
0806 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
0807 bool create)
0808 {
0809 struct kvm_nested_guest *gp, *newgp;
0810
0811 if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
0812 return NULL;
0813
0814 spin_lock(&kvm->mmu_lock);
0815 gp = __find_nested(kvm, l1_lpid);
0816 if (gp)
0817 ++gp->refcnt;
0818 spin_unlock(&kvm->mmu_lock);
0819
0820 if (gp || !create)
0821 return gp;
0822
0823 newgp = kvmhv_alloc_nested(kvm, l1_lpid);
0824 if (!newgp)
0825 return NULL;
0826
0827 if (!__prealloc_nested(kvm, l1_lpid)) {
0828 kvmhv_release_nested(newgp);
0829 return NULL;
0830 }
0831
0832 spin_lock(&kvm->mmu_lock);
0833 gp = __find_nested(kvm, l1_lpid);
0834 if (!gp) {
0835 __add_nested(kvm, l1_lpid, newgp);
0836 ++newgp->refcnt;
0837 gp = newgp;
0838 newgp = NULL;
0839 }
0840 ++gp->refcnt;
0841 spin_unlock(&kvm->mmu_lock);
0842
0843 if (newgp)
0844 kvmhv_release_nested(newgp);
0845
0846 return gp;
0847 }
0848
0849 void kvmhv_put_nested(struct kvm_nested_guest *gp)
0850 {
0851 struct kvm *kvm = gp->l1_host;
0852 long ref;
0853
0854 spin_lock(&kvm->mmu_lock);
0855 ref = --gp->refcnt;
0856 spin_unlock(&kvm->mmu_lock);
0857 if (ref == 0)
0858 kvmhv_release_nested(gp);
0859 }
0860
0861 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
0862 unsigned long ea, unsigned *hshift)
0863 {
0864 struct kvm_nested_guest *gp;
0865 pte_t *pte;
0866
0867 gp = __find_nested(kvm, lpid);
0868 if (!gp)
0869 return NULL;
0870
0871 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
0872 "%s called with kvm mmu_lock not held \n", __func__);
0873 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
0874
0875 return pte;
0876 }
0877
0878 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
0879 {
0880 return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
0881 RMAP_NESTED_GPA_MASK));
0882 }
0883
0884 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
0885 struct rmap_nested **n_rmap)
0886 {
0887 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
0888 struct rmap_nested *cursor;
0889 u64 rmap, new_rmap = (*n_rmap)->rmap;
0890
0891
0892 if (!(*rmapp)) {
0893
0894 *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
0895 return;
0896 }
0897
0898
0899 for_each_nest_rmap_safe(cursor, entry, &rmap) {
0900 if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
0901 return;
0902 }
0903
0904
0905 rmap = *rmapp;
0906 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY)
0907 *rmapp = 0UL;
0908 llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
0909 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY)
0910 (*n_rmap)->list.next = (struct llist_node *) rmap;
0911
0912
0913 *n_rmap = NULL;
0914 }
0915
0916 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
0917 unsigned long clr, unsigned long set,
0918 unsigned long hpa, unsigned long mask)
0919 {
0920 unsigned long gpa;
0921 unsigned int shift, lpid;
0922 pte_t *ptep;
0923
0924 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
0925 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
0926
0927
0928 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
0929
0930
0931
0932
0933
0934
0935 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
0936 __radix_pte_update(ptep, clr, set);
0937 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
0938 }
0939 }
0940
0941
0942
0943
0944
0945 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
0946 unsigned long clr, unsigned long set,
0947 unsigned long hpa, unsigned long nbytes)
0948 {
0949 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
0950 struct rmap_nested *cursor;
0951 unsigned long rmap, mask;
0952
0953 if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
0954 return;
0955
0956 mask = PTE_RPN_MASK & ~(nbytes - 1);
0957 hpa &= mask;
0958
0959 for_each_nest_rmap_safe(cursor, entry, &rmap)
0960 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
0961 }
0962
0963 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
0964 unsigned long hpa, unsigned long mask)
0965 {
0966 struct kvm_nested_guest *gp;
0967 unsigned long gpa;
0968 unsigned int shift, lpid;
0969 pte_t *ptep;
0970
0971 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
0972 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
0973 gp = __find_nested(kvm, lpid);
0974 if (!gp)
0975 return;
0976
0977
0978 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
0979
0980 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
0981 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
0982 }
0983
0984 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
0985 unsigned long hpa, unsigned long mask)
0986 {
0987 struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
0988 struct rmap_nested *cursor;
0989 unsigned long rmap;
0990
0991 for_each_nest_rmap_safe(cursor, entry, &rmap) {
0992 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
0993 kfree(cursor);
0994 }
0995 }
0996
0997
0998 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
0999 const struct kvm_memory_slot *memslot,
1000 unsigned long gpa, unsigned long hpa,
1001 unsigned long nbytes)
1002 {
1003 unsigned long gfn, end_gfn;
1004 unsigned long addr_mask;
1005
1006 if (!memslot)
1007 return;
1008 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
1009 end_gfn = gfn + (nbytes >> PAGE_SHIFT);
1010
1011 addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
1012 hpa &= addr_mask;
1013
1014 for (; gfn < end_gfn; gfn++) {
1015 unsigned long *rmap = &memslot->arch.rmap[gfn];
1016 kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
1017 }
1018 }
1019
1020 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
1021 {
1022 unsigned long page;
1023
1024 for (page = 0; page < free->npages; page++) {
1025 unsigned long rmap, *rmapp = &free->arch.rmap[page];
1026 struct rmap_nested *cursor;
1027 struct llist_node *entry;
1028
1029 entry = llist_del_all((struct llist_head *) rmapp);
1030 for_each_nest_rmap_safe(cursor, entry, &rmap)
1031 kfree(cursor);
1032 }
1033 }
1034
1035 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
1036 struct kvm_nested_guest *gp,
1037 long gpa, int *shift_ret)
1038 {
1039 struct kvm *kvm = vcpu->kvm;
1040 bool ret = false;
1041 pte_t *ptep;
1042 int shift;
1043
1044 spin_lock(&kvm->mmu_lock);
1045 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
1046 if (!shift)
1047 shift = PAGE_SHIFT;
1048 if (ptep && pte_present(*ptep)) {
1049 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1050 ret = true;
1051 }
1052 spin_unlock(&kvm->mmu_lock);
1053
1054 if (shift_ret)
1055 *shift_ret = shift;
1056 return ret;
1057 }
1058
1059 static inline int get_ric(unsigned int instr)
1060 {
1061 return (instr >> 18) & 0x3;
1062 }
1063
1064 static inline int get_prs(unsigned int instr)
1065 {
1066 return (instr >> 17) & 0x1;
1067 }
1068
1069 static inline int get_r(unsigned int instr)
1070 {
1071 return (instr >> 16) & 0x1;
1072 }
1073
1074 static inline int get_lpid(unsigned long r_val)
1075 {
1076 return r_val & 0xffffffff;
1077 }
1078
1079 static inline int get_is(unsigned long r_val)
1080 {
1081 return (r_val >> 10) & 0x3;
1082 }
1083
1084 static inline int get_ap(unsigned long r_val)
1085 {
1086 return (r_val >> 5) & 0x7;
1087 }
1088
1089 static inline long get_epn(unsigned long r_val)
1090 {
1091 return r_val >> 12;
1092 }
1093
1094 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1095 int ap, long epn)
1096 {
1097 struct kvm *kvm = vcpu->kvm;
1098 struct kvm_nested_guest *gp;
1099 long npages;
1100 int shift, shadow_shift;
1101 unsigned long addr;
1102
1103 shift = ap_to_shift(ap);
1104 addr = epn << 12;
1105 if (shift < 0)
1106
1107 return -EINVAL;
1108
1109 addr &= ~((1UL << shift) - 1);
1110 npages = 1UL << (shift - PAGE_SHIFT);
1111
1112 gp = kvmhv_get_nested(kvm, lpid, false);
1113 if (!gp)
1114 return 0;
1115 mutex_lock(&gp->tlb_lock);
1116
1117
1118 do {
1119 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1120
1121 npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1122 addr += 1UL << shadow_shift;
1123 } while (npages > 0);
1124
1125 mutex_unlock(&gp->tlb_lock);
1126 kvmhv_put_nested(gp);
1127 return 0;
1128 }
1129
1130 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1131 struct kvm_nested_guest *gp, int ric)
1132 {
1133 struct kvm *kvm = vcpu->kvm;
1134
1135 mutex_lock(&gp->tlb_lock);
1136 switch (ric) {
1137 case 0:
1138
1139 spin_lock(&kvm->mmu_lock);
1140 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1141 gp->shadow_lpid);
1142 kvmhv_flush_lpid(gp->shadow_lpid);
1143 spin_unlock(&kvm->mmu_lock);
1144 break;
1145 case 1:
1146
1147
1148
1149
1150 break;
1151 case 2:
1152
1153 kvmhv_flush_nested(gp);
1154 break;
1155 default:
1156 break;
1157 }
1158 mutex_unlock(&gp->tlb_lock);
1159 }
1160
1161 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1162 {
1163 struct kvm *kvm = vcpu->kvm;
1164 struct kvm_nested_guest *gp;
1165 int lpid;
1166
1167 spin_lock(&kvm->mmu_lock);
1168 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
1169 spin_unlock(&kvm->mmu_lock);
1170 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1171 spin_lock(&kvm->mmu_lock);
1172 }
1173 spin_unlock(&kvm->mmu_lock);
1174 }
1175
1176 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1177 unsigned long rsval, unsigned long rbval)
1178 {
1179 struct kvm *kvm = vcpu->kvm;
1180 struct kvm_nested_guest *gp;
1181 int r, ric, prs, is, ap;
1182 int lpid;
1183 long epn;
1184 int ret = 0;
1185
1186 ric = get_ric(instr);
1187 prs = get_prs(instr);
1188 r = get_r(instr);
1189 lpid = get_lpid(rsval);
1190 is = get_is(rbval);
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1201 ((!is) && (ric == 1 || ric == 2)))
1202 return -EINVAL;
1203
1204 switch (is) {
1205 case 0:
1206
1207
1208
1209
1210 epn = get_epn(rbval);
1211 ap = get_ap(rbval);
1212 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1213 break;
1214 case 2:
1215
1216 gp = kvmhv_get_nested(kvm, lpid, false);
1217 if (gp) {
1218 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1219 kvmhv_put_nested(gp);
1220 }
1221 break;
1222 case 3:
1223
1224 kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1225 break;
1226 default:
1227 ret = -EINVAL;
1228 break;
1229 }
1230
1231 return ret;
1232 }
1233
1234
1235
1236
1237
1238
1239 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1240 {
1241 int ret;
1242
1243 ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1244 kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1245 if (ret)
1246 return H_PARAMETER;
1247 return H_SUCCESS;
1248 }
1249
1250 static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
1251 unsigned long lpid, unsigned long ric)
1252 {
1253 struct kvm *kvm = vcpu->kvm;
1254 struct kvm_nested_guest *gp;
1255
1256 gp = kvmhv_get_nested(kvm, lpid, false);
1257 if (gp) {
1258 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1259 kvmhv_put_nested(gp);
1260 }
1261 return H_SUCCESS;
1262 }
1263
1264
1265
1266
1267
1268 static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
1269
1270 static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
1271 unsigned long lpid,
1272 unsigned long pg_sizes,
1273 unsigned long start,
1274 unsigned long end)
1275 {
1276 int ret = H_P4;
1277 unsigned long addr, nr_pages;
1278 struct mmu_psize_def *def;
1279 unsigned long psize, ap, page_size;
1280 bool flush_lpid;
1281
1282 for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1283 def = &mmu_psize_defs[psize];
1284 if (!(pg_sizes & def->h_rpt_pgsize))
1285 continue;
1286
1287 nr_pages = (end - start) >> def->shift;
1288 flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
1289 if (flush_lpid)
1290 return do_tlb_invalidate_nested_all(vcpu, lpid,
1291 RIC_FLUSH_TLB);
1292 addr = start;
1293 ap = mmu_get_ap(psize);
1294 page_size = 1UL << def->shift;
1295 do {
1296 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
1297 get_epn(addr));
1298 if (ret)
1299 return H_P4;
1300 addr += page_size;
1301 } while (addr < end);
1302 }
1303 return ret;
1304 }
1305
1306
1307
1308
1309
1310 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
1311 unsigned long type, unsigned long pg_sizes,
1312 unsigned long start, unsigned long end)
1313 {
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323 if (!__find_nested(vcpu->kvm, lpid))
1324 return H_SUCCESS;
1325
1326
1327
1328
1329 if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
1330 return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 if (start == 0 && end == -1)
1349 return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
1350
1351 if (type & H_RPTI_TYPE_TLB)
1352 return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
1353 start, end);
1354 return H_SUCCESS;
1355 }
1356
1357
1358 static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1359 struct kvm_nested_guest *gp,
1360 unsigned long n_gpa, unsigned long dsisr,
1361 struct kvmppc_pte *gpte_p)
1362 {
1363 u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1364 int ret;
1365
1366 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1367 &fault_addr);
1368
1369 if (ret) {
1370
1371 if (ret == -EINVAL) {
1372
1373 flags |= DSISR_UNSUPP_MMU;
1374 } else if (ret == -ENOENT) {
1375
1376 flags |= DSISR_NOHPTE;
1377 } else if (ret == -EFAULT) {
1378
1379 flags |= DSISR_PRTABLE_FAULT;
1380 vcpu->arch.fault_gpa = fault_addr;
1381 } else {
1382
1383 return ret;
1384 }
1385 goto forward_to_l1;
1386 } else {
1387
1388 if (dsisr & DSISR_ISSTORE) {
1389
1390 if (!gpte_p->may_write) {
1391 flags |= DSISR_PROTFAULT;
1392 goto forward_to_l1;
1393 }
1394 } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1395
1396 if (!gpte_p->may_execute) {
1397 flags |= SRR1_ISI_N_G_OR_CIP;
1398 goto forward_to_l1;
1399 }
1400 } else {
1401
1402 if (!gpte_p->may_read && !gpte_p->may_write) {
1403 flags |= DSISR_PROTFAULT;
1404 goto forward_to_l1;
1405 }
1406 }
1407 }
1408
1409 return 0;
1410
1411 forward_to_l1:
1412 vcpu->arch.fault_dsisr = flags;
1413 if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1414 vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1415 vcpu->arch.shregs.msr |= flags;
1416 }
1417 return RESUME_HOST;
1418 }
1419
1420 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1421 struct kvm_nested_guest *gp,
1422 unsigned long n_gpa,
1423 struct kvmppc_pte gpte,
1424 unsigned long dsisr)
1425 {
1426 struct kvm *kvm = vcpu->kvm;
1427 bool writing = !!(dsisr & DSISR_ISSTORE);
1428 u64 pgflags;
1429 long ret;
1430
1431
1432 pgflags = _PAGE_ACCESSED;
1433 if (writing)
1434 pgflags |= _PAGE_DIRTY;
1435 if (pgflags & ~gpte.rc)
1436 return RESUME_HOST;
1437
1438 spin_lock(&kvm->mmu_lock);
1439
1440 ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1441 gpte.raddr, kvm->arch.lpid);
1442 if (!ret) {
1443 ret = -EINVAL;
1444 goto out_unlock;
1445 }
1446
1447
1448 ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1449 n_gpa, gp->l1_lpid);
1450 if (!ret)
1451 ret = -EINVAL;
1452 else
1453 ret = 0;
1454
1455 out_unlock:
1456 spin_unlock(&kvm->mmu_lock);
1457 return ret;
1458 }
1459
1460 static inline int kvmppc_radix_level_to_shift(int level)
1461 {
1462 switch (level) {
1463 case 2:
1464 return PUD_SHIFT;
1465 case 1:
1466 return PMD_SHIFT;
1467 default:
1468 return PAGE_SHIFT;
1469 }
1470 }
1471
1472 static inline int kvmppc_radix_shift_to_level(int shift)
1473 {
1474 if (shift == PUD_SHIFT)
1475 return 2;
1476 if (shift == PMD_SHIFT)
1477 return 1;
1478 if (shift == PAGE_SHIFT)
1479 return 0;
1480 WARN_ON_ONCE(1);
1481 return 0;
1482 }
1483
1484
1485 static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1486 struct kvm_nested_guest *gp)
1487 {
1488 struct kvm *kvm = vcpu->kvm;
1489 struct kvm_memory_slot *memslot;
1490 struct rmap_nested *n_rmap;
1491 struct kvmppc_pte gpte;
1492 pte_t pte, *pte_p;
1493 unsigned long mmu_seq;
1494 unsigned long dsisr = vcpu->arch.fault_dsisr;
1495 unsigned long ea = vcpu->arch.fault_dar;
1496 unsigned long *rmapp;
1497 unsigned long n_gpa, gpa, gfn, perm = 0UL;
1498 unsigned int shift, l1_shift, level;
1499 bool writing = !!(dsisr & DSISR_ISSTORE);
1500 bool kvm_ro = false;
1501 long int ret;
1502
1503 if (!gp->l1_gr_to_hr) {
1504 kvmhv_update_ptbl_cache(gp);
1505 if (!gp->l1_gr_to_hr)
1506 return RESUME_HOST;
1507 }
1508
1509
1510
1511 n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1512 if (!(dsisr & DSISR_PRTABLE_FAULT))
1513 n_gpa |= ea & 0xFFF;
1514 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1515
1516
1517
1518
1519
1520
1521 if (ret == RESUME_HOST &&
1522 (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1523 DSISR_BAD_COPYPASTE)))
1524 goto inval;
1525 if (ret)
1526 return ret;
1527
1528
1529 if (dsisr & DSISR_SET_RC) {
1530 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1531 if (ret == RESUME_HOST)
1532 return ret;
1533 if (ret)
1534 goto inval;
1535 dsisr &= ~DSISR_SET_RC;
1536 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1537 DSISR_PROTFAULT)))
1538 return RESUME_GUEST;
1539 }
1540
1541
1542
1543
1544
1545
1546
1547 l1_shift = gpte.page_shift;
1548 if (l1_shift < PAGE_SHIFT) {
1549
1550 pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1551 l1_shift, PAGE_SHIFT);
1552 return -EINVAL;
1553 }
1554 gpa = gpte.raddr;
1555 gfn = gpa >> PAGE_SHIFT;
1556
1557
1558
1559 memslot = gfn_to_memslot(kvm, gfn);
1560 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1561 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1562
1563 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
1564 return RESUME_GUEST;
1565 }
1566
1567
1568 return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1569 }
1570 if (memslot->flags & KVM_MEM_READONLY) {
1571 if (writing) {
1572
1573 kvmppc_core_queue_data_storage(vcpu, ea,
1574 DSISR_ISSTORE | DSISR_PROTFAULT);
1575 return RESUME_GUEST;
1576 }
1577 kvm_ro = true;
1578 }
1579
1580
1581
1582
1583 mmu_seq = kvm->mmu_invalidate_seq;
1584 smp_rmb();
1585
1586
1587 pte = __pte(0);
1588 spin_lock(&kvm->mmu_lock);
1589 pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1590 if (!shift)
1591 shift = PAGE_SHIFT;
1592 if (pte_p)
1593 pte = *pte_p;
1594 spin_unlock(&kvm->mmu_lock);
1595
1596 if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1597
1598 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1599 writing, kvm_ro, &pte, &level);
1600 if (ret == -EAGAIN)
1601 return RESUME_GUEST;
1602 else if (ret)
1603 return ret;
1604 shift = kvmppc_radix_level_to_shift(level);
1605 }
1606
1607 gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1608
1609
1610
1611
1612 perm |= gpte.may_read ? 0UL : _PAGE_READ;
1613 perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1614 perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1615
1616 perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1617 perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1618 pte = __pte(pte_val(pte) & ~perm);
1619
1620
1621 if (shift > l1_shift) {
1622 u64 mask;
1623 unsigned int actual_shift = PAGE_SHIFT;
1624 if (PMD_SHIFT < l1_shift)
1625 actual_shift = PMD_SHIFT;
1626 mask = (1UL << shift) - (1UL << actual_shift);
1627 pte = __pte(pte_val(pte) | (gpa & mask));
1628 shift = actual_shift;
1629 }
1630 level = kvmppc_radix_shift_to_level(shift);
1631 n_gpa &= ~((1UL << shift) - 1);
1632
1633
1634
1635 n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1636 if (!n_rmap)
1637 return RESUME_GUEST;
1638 n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1639 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1640 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1641 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1642 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1643 kfree(n_rmap);
1644 if (ret == -EAGAIN)
1645 ret = RESUME_GUEST;
1646
1647 return ret;
1648
1649 inval:
1650 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1651 return RESUME_GUEST;
1652 }
1653
1654 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1655 {
1656 struct kvm_nested_guest *gp = vcpu->arch.nested;
1657 long int ret;
1658
1659 mutex_lock(&gp->tlb_lock);
1660 ret = __kvmhv_nested_page_fault(vcpu, gp);
1661 mutex_unlock(&gp->tlb_lock);
1662 return ret;
1663 }
1664
1665 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1666 {
1667 int ret = lpid + 1;
1668
1669 spin_lock(&kvm->mmu_lock);
1670 if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
1671 ret = -1;
1672 spin_unlock(&kvm->mmu_lock);
1673
1674 return ret;
1675 }