0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/kvm_host.h>
0015 #include <linux/log2.h>
0016 #include <asm/mmu_context.h>
0017 #include <asm/msa.h>
0018 #include <asm/setup.h>
0019 #include <asm/tlbex.h>
0020 #include <asm/uasm.h>
0021
0022
0023 #define ZERO 0
0024 #define AT 1
0025 #define V0 2
0026 #define V1 3
0027 #define A0 4
0028 #define A1 5
0029
0030 #if _MIPS_SIM == _MIPS_SIM_ABI32
0031 #define T0 8
0032 #define T1 9
0033 #define T2 10
0034 #define T3 11
0035 #endif
0036
0037 #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
0038 #define T0 12
0039 #define T1 13
0040 #define T2 14
0041 #define T3 15
0042 #endif
0043
0044 #define S0 16
0045 #define S1 17
0046 #define T9 25
0047 #define K0 26
0048 #define K1 27
0049 #define GP 28
0050 #define SP 29
0051 #define RA 31
0052
0053
0054 #define C0_PWBASE 5, 5
0055 #define C0_HWRENA 7, 0
0056 #define C0_BADVADDR 8, 0
0057 #define C0_BADINSTR 8, 1
0058 #define C0_BADINSTRP 8, 2
0059 #define C0_PGD 9, 7
0060 #define C0_ENTRYHI 10, 0
0061 #define C0_GUESTCTL1 10, 4
0062 #define C0_STATUS 12, 0
0063 #define C0_GUESTCTL0 12, 6
0064 #define C0_CAUSE 13, 0
0065 #define C0_EPC 14, 0
0066 #define C0_EBASE 15, 1
0067 #define C0_CONFIG5 16, 5
0068 #define C0_DDATA_LO 28, 3
0069 #define C0_ERROREPC 30, 0
0070
0071 #define CALLFRAME_SIZ 32
0072
0073 #ifdef CONFIG_64BIT
0074 #define ST0_KX_IF_64 ST0_KX
0075 #else
0076 #define ST0_KX_IF_64 0
0077 #endif
0078
0079 static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
0080 static unsigned int scratch_tmp[2] = { C0_ERROREPC };
0081
0082 enum label_id {
0083 label_fpu_1 = 1,
0084 label_msa_1,
0085 label_return_to_host,
0086 label_kernel_asid,
0087 label_exit_common,
0088 };
0089
0090 UASM_L_LA(_fpu_1)
0091 UASM_L_LA(_msa_1)
0092 UASM_L_LA(_return_to_host)
0093 UASM_L_LA(_kernel_asid)
0094 UASM_L_LA(_exit_common)
0095
0096 static void *kvm_mips_build_enter_guest(void *addr);
0097 static void *kvm_mips_build_ret_from_exit(void *addr);
0098 static void *kvm_mips_build_ret_to_guest(void *addr);
0099 static void *kvm_mips_build_ret_to_host(void *addr);
0100
0101
0102
0103
0104
0105 static int c0_kscratch(void)
0106 {
0107 return 31;
0108 }
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 int kvm_mips_entry_setup(void)
0119 {
0120
0121
0122
0123
0124 unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
0125
0126 if (pgd_reg != -1)
0127 kscratch_mask &= ~BIT(pgd_reg);
0128
0129
0130 if (kscratch_mask) {
0131 scratch_vcpu[0] = c0_kscratch();
0132 scratch_vcpu[1] = ffs(kscratch_mask) - 1;
0133 kscratch_mask &= ~BIT(scratch_vcpu[1]);
0134 }
0135
0136
0137 if (kscratch_mask) {
0138 scratch_tmp[0] = c0_kscratch();
0139 scratch_tmp[1] = ffs(kscratch_mask) - 1;
0140 kscratch_mask &= ~BIT(scratch_tmp[1]);
0141 }
0142
0143 return 0;
0144 }
0145
0146 static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
0147 unsigned int frame)
0148 {
0149
0150 UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
0151 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
0152
0153
0154 if (scratch_tmp[0] == c0_kscratch()) {
0155 UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
0156 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
0157 }
0158 }
0159
0160 static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
0161 unsigned int frame)
0162 {
0163
0164
0165
0166
0167 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
0168 UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
0169
0170 if (scratch_tmp[0] == c0_kscratch()) {
0171 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
0172 UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
0173 }
0174 }
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184 static inline void build_set_exc_base(u32 **p, unsigned int reg)
0185 {
0186 if (cpu_has_ebase_wg) {
0187
0188 uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
0189 UASM_i_MTC0(p, reg, C0_EBASE);
0190 } else {
0191 uasm_i_mtc0(p, reg, C0_EBASE);
0192 }
0193 }
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209 void *kvm_mips_build_vcpu_run(void *addr)
0210 {
0211 u32 *p = addr;
0212 unsigned int i;
0213
0214
0215
0216
0217
0218
0219 UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
0220 for (i = 16; i < 32; ++i) {
0221 if (i == 24)
0222 i = 28;
0223 UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
0224 }
0225
0226
0227 uasm_i_mfc0(&p, V0, C0_STATUS);
0228 UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
0229
0230
0231 kvm_mips_build_save_scratch(&p, V1, K1);
0232
0233
0234 UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
0235
0236
0237 UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
0238
0239
0240
0241
0242
0243 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
0244
0245
0246 UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
0247
0248
0249
0250
0251
0252 UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
0253 uasm_i_mtc0(&p, K0, C0_STATUS);
0254 uasm_i_ehb(&p);
0255
0256
0257 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
0258 build_set_exc_base(&p, K0);
0259
0260
0261
0262
0263
0264
0265 uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
0266 uasm_i_andi(&p, V0, V0, ST0_IM);
0267 uasm_i_or(&p, K0, K0, V0);
0268 uasm_i_mtc0(&p, K0, C0_STATUS);
0269 uasm_i_ehb(&p);
0270
0271 p = kvm_mips_build_enter_guest(p);
0272
0273 return p;
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286 static void *kvm_mips_build_enter_guest(void *addr)
0287 {
0288 u32 *p = addr;
0289 unsigned int i;
0290 struct uasm_label labels[2];
0291 struct uasm_reloc relocs[2];
0292 struct uasm_label __maybe_unused *l = labels;
0293 struct uasm_reloc __maybe_unused *r = relocs;
0294
0295 memset(labels, 0, sizeof(labels));
0296 memset(relocs, 0, sizeof(relocs));
0297
0298
0299 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
0300 UASM_i_MTC0(&p, T0, C0_EPC);
0301
0302
0303 if (cpu_has_ldpte)
0304 UASM_i_MFC0(&p, K0, C0_PWBASE);
0305 else
0306 UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
0307 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317 UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
0318 (int)offsetof(struct kvm_vcpu, arch), K1);
0319 UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
0320 UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
0321 uasm_i_jalr(&p, RA, T9);
0322
0323 if (cpu_has_htw)
0324 UASM_i_MTC0(&p, A0, C0_PWBASE);
0325 else
0326 uasm_i_nop(&p);
0327
0328
0329 uasm_i_addiu(&p, V1, ZERO, 1);
0330 uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
0331 uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
0332 uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
0333
0334 if (cpu_has_guestid) {
0335
0336
0337
0338
0339
0340
0341 uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
0342
0343 uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
0344 MIPS_GCTL1_ID_WIDTH);
0345 uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
0346 MIPS_GCTL1_RID_WIDTH);
0347 uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
0348
0349
0350 goto skip_asid_restore;
0351 }
0352
0353
0354
0355
0356 UASM_i_MFC0(&p, K0, C0_ENTRYHI);
0357 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
0358 K1);
0359
0360
0361 UASM_i_ADDIU(&p, T1, S0,
0362 offsetof(struct kvm, arch.gpa_mm.context.asid));
0363
0364
0365
0366 uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
0367
0368 uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
0369 UASM_i_ADDU(&p, T3, T1, T2);
0370 UASM_i_LW(&p, K0, 0, T3);
0371 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
0372
0373
0374
0375
0376 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
0377 uasm_i_mul(&p, T2, T2, T3);
0378
0379 UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
0380 UASM_i_ADDU(&p, AT, AT, T2);
0381 UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
0382 uasm_i_and(&p, K0, K0, T2);
0383 #else
0384 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
0385 #endif
0386
0387
0388 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
0389 skip_asid_restore:
0390 uasm_i_ehb(&p);
0391
0392
0393 uasm_i_mtc0(&p, ZERO, C0_HWRENA);
0394
0395
0396 for (i = 1; i < 32; ++i) {
0397
0398 if (i == K0 || i == K1)
0399 continue;
0400 UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
0401 }
0402
0403 #ifndef CONFIG_CPU_MIPSR6
0404
0405 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
0406 uasm_i_mthi(&p, K0);
0407
0408 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
0409 uasm_i_mtlo(&p, K0);
0410 #endif
0411
0412
0413 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
0414 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
0415
0416
0417 uasm_i_eret(&p);
0418
0419 uasm_resolve_relocs(relocs, labels);
0420
0421 return p;
0422 }
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
0434 {
0435 u32 *p = addr;
0436 struct uasm_label labels[2];
0437 struct uasm_reloc relocs[2];
0438 #ifndef CONFIG_CPU_LOONGSON64
0439 struct uasm_label *l = labels;
0440 struct uasm_reloc *r = relocs;
0441 #endif
0442
0443 memset(labels, 0, sizeof(labels));
0444 memset(relocs, 0, sizeof(relocs));
0445
0446
0447 UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
0448
0449
0450 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
0451
0452
0453 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
0454
0455
0456
0457
0458
0459 preempt_disable();
0460
0461 #ifdef CONFIG_CPU_LOONGSON64
0462 UASM_i_MFC0(&p, K1, C0_PGD);
0463 uasm_i_lddir(&p, K0, K1, 3);
0464 #ifndef __PAGETABLE_PMD_FOLDED
0465 uasm_i_lddir(&p, K1, K0, 1);
0466 #endif
0467 uasm_i_ldpte(&p, K1, 0);
0468 uasm_i_ldpte(&p, K1, 1);
0469 uasm_i_tlbwr(&p);
0470 #else
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482 #ifdef CONFIG_64BIT
0483 build_get_pmde64(&p, &l, &r, K0, K1);
0484 #else
0485 build_get_pgde32(&p, K0, K1);
0486 #endif
0487
0488
0489
0490 build_get_ptep(&p, K0, K1);
0491 build_update_entries(&p, K0, K1);
0492 build_tlb_write_entry(&p, &l, &r, tlb_random);
0493 #endif
0494
0495 preempt_enable();
0496
0497
0498 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
0499
0500
0501 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
0502 uasm_i_ehb(&p);
0503 UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
0504
0505
0506 uasm_i_eret(&p);
0507
0508 return p;
0509 }
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521 void *kvm_mips_build_exception(void *addr, void *handler)
0522 {
0523 u32 *p = addr;
0524 struct uasm_label labels[2];
0525 struct uasm_reloc relocs[2];
0526 struct uasm_label *l = labels;
0527 struct uasm_reloc *r = relocs;
0528
0529 memset(labels, 0, sizeof(labels));
0530 memset(relocs, 0, sizeof(relocs));
0531
0532
0533 UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
0534
0535
0536 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
0537 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
0538
0539
0540 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
0541
0542
0543 uasm_il_b(&p, &r, label_exit_common);
0544 uasm_i_nop(&p);
0545
0546 uasm_l_exit_common(&l, handler);
0547 uasm_resolve_relocs(relocs, labels);
0548
0549 return p;
0550 }
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563 void *kvm_mips_build_exit(void *addr)
0564 {
0565 u32 *p = addr;
0566 unsigned int i;
0567 struct uasm_label labels[3];
0568 struct uasm_reloc relocs[3];
0569 struct uasm_label *l = labels;
0570 struct uasm_reloc *r = relocs;
0571
0572 memset(labels, 0, sizeof(labels));
0573 memset(relocs, 0, sizeof(relocs));
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586 for (i = 0; i < 32; ++i) {
0587
0588 if (i == K0 || i == K1)
0589 continue;
0590 UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
0591 }
0592
0593 #ifndef CONFIG_CPU_MIPSR6
0594
0595 uasm_i_mfhi(&p, T0);
0596 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
0597
0598 uasm_i_mflo(&p, T0);
0599 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
0600 #endif
0601
0602
0603 uasm_i_ehb(&p);
0604 UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
0605 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
0606
0607
0608
0609
0610 UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
0611
0612
0613
0614
0615
0616 UASM_i_MFC0(&p, K0, C0_EPC);
0617 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
0618
0619 UASM_i_MFC0(&p, K0, C0_BADVADDR);
0620 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
0621 K1);
0622
0623 uasm_i_mfc0(&p, K0, C0_CAUSE);
0624 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
0625
0626 if (cpu_has_badinstr) {
0627 uasm_i_mfc0(&p, K0, C0_BADINSTR);
0628 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
0629 host_cp0_badinstr), K1);
0630 }
0631
0632 if (cpu_has_badinstrp) {
0633 uasm_i_mfc0(&p, K0, C0_BADINSTRP);
0634 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
0635 host_cp0_badinstrp), K1);
0636 }
0637
0638
0639
0640
0641
0642 uasm_i_mfc0(&p, V0, C0_STATUS);
0643
0644 uasm_i_lui(&p, AT, ST0_BEV >> 16);
0645 uasm_i_or(&p, K0, V0, AT);
0646
0647 uasm_i_mtc0(&p, K0, C0_STATUS);
0648 uasm_i_ehb(&p);
0649
0650 UASM_i_LA_mostly(&p, K0, (long)&ebase);
0651 UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
0652 build_set_exc_base(&p, K0);
0653
0654 if (raw_cpu_has_fpu) {
0655
0656
0657
0658
0659 uasm_i_lui(&p, AT, ST0_CU1 >> 16);
0660 uasm_i_and(&p, V1, V0, AT);
0661 uasm_il_beqz(&p, &r, V1, label_fpu_1);
0662 uasm_i_nop(&p);
0663 uasm_i_cfc1(&p, T0, 31);
0664 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
0665 K1);
0666 uasm_i_ctc1(&p, ZERO, 31);
0667 uasm_l_fpu_1(&l, p);
0668 }
0669
0670 if (cpu_has_msa) {
0671
0672
0673
0674
0675 uasm_i_mfc0(&p, T0, C0_CONFIG5);
0676 uasm_i_ext(&p, T0, T0, 27, 1);
0677 uasm_il_beqz(&p, &r, T0, label_msa_1);
0678 uasm_i_nop(&p);
0679 uasm_i_cfcmsa(&p, T0, MSA_CSR);
0680 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
0681 K1);
0682 uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
0683 uasm_l_msa_1(&l, p);
0684 }
0685
0686
0687 if (!cpu_has_guestid) {
0688 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
0689 K1);
0690 UASM_i_MTC0(&p, K0, C0_ENTRYHI);
0691 }
0692
0693
0694
0695
0696
0697
0698
0699 UASM_i_LW(&p, A0,
0700 offsetof(struct kvm_vcpu_arch, host_pgd), K1);
0701 UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
0702 uasm_i_jalr(&p, RA, T9);
0703
0704 if (cpu_has_htw)
0705 UASM_i_MTC0(&p, A0, C0_PWBASE);
0706 else
0707 uasm_i_nop(&p);
0708
0709
0710 uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
0711 uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
0712 uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
0713
0714
0715 uasm_i_sw(&p, K0,
0716 offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
0717
0718 if (cpu_has_guestid) {
0719
0720
0721
0722
0723 uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
0724
0725 uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
0726 MIPS_GCTL1_RID_WIDTH);
0727 uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
0728 }
0729
0730
0731 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
0732 uasm_i_and(&p, V0, V0, AT);
0733 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
0734 uasm_i_or(&p, V0, V0, AT);
0735 #ifdef CONFIG_64BIT
0736 uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
0737 #endif
0738 uasm_i_mtc0(&p, V0, C0_STATUS);
0739 uasm_i_ehb(&p);
0740
0741
0742 UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
0743
0744
0745 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
0746
0747
0748 UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
0749
0750
0751
0752
0753
0754
0755
0756 kvm_mips_build_restore_scratch(&p, K0, SP);
0757
0758
0759 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
0760 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
0761 uasm_i_mtc0(&p, K0, C0_HWRENA);
0762
0763
0764
0765
0766
0767
0768
0769 uasm_i_move(&p, A0, S0);
0770 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
0771 uasm_i_jalr(&p, RA, T9);
0772 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
0773
0774 uasm_resolve_relocs(relocs, labels);
0775
0776 p = kvm_mips_build_ret_from_exit(p);
0777
0778 return p;
0779 }
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790 static void *kvm_mips_build_ret_from_exit(void *addr)
0791 {
0792 u32 *p = addr;
0793 struct uasm_label labels[2];
0794 struct uasm_reloc relocs[2];
0795 struct uasm_label *l = labels;
0796 struct uasm_reloc *r = relocs;
0797
0798 memset(labels, 0, sizeof(labels));
0799 memset(relocs, 0, sizeof(relocs));
0800
0801
0802 uasm_i_di(&p, ZERO);
0803 uasm_i_ehb(&p);
0804
0805
0806
0807
0808
0809
0810
0811 uasm_i_move(&p, K1, S0);
0812 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
0813
0814
0815
0816
0817
0818 uasm_i_andi(&p, T0, V0, RESUME_HOST);
0819 uasm_il_bnez(&p, &r, T0, label_return_to_host);
0820 uasm_i_nop(&p);
0821
0822 p = kvm_mips_build_ret_to_guest(p);
0823
0824 uasm_l_return_to_host(&l, p);
0825 p = kvm_mips_build_ret_to_host(p);
0826
0827 uasm_resolve_relocs(relocs, labels);
0828
0829 return p;
0830 }
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841 static void *kvm_mips_build_ret_to_guest(void *addr)
0842 {
0843 u32 *p = addr;
0844
0845
0846 UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
0847
0848
0849 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
0850
0851
0852 uasm_i_mfc0(&p, V1, C0_STATUS);
0853 uasm_i_lui(&p, AT, ST0_BEV >> 16);
0854 uasm_i_or(&p, K0, V1, AT);
0855 uasm_i_mtc0(&p, K0, C0_STATUS);
0856 uasm_i_ehb(&p);
0857 build_set_exc_base(&p, T0);
0858
0859
0860 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
0861 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
0862 uasm_i_and(&p, V1, V1, AT);
0863 uasm_i_mtc0(&p, V1, C0_STATUS);
0864 uasm_i_ehb(&p);
0865
0866 p = kvm_mips_build_enter_guest(p);
0867
0868 return p;
0869 }
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881 static void *kvm_mips_build_ret_to_host(void *addr)
0882 {
0883 u32 *p = addr;
0884 unsigned int i;
0885
0886
0887 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
0888 UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
0889
0890
0891
0892
0893
0894 uasm_i_sra(&p, K0, V0, 2);
0895 uasm_i_move(&p, V0, K0);
0896
0897
0898 for (i = 16; i < 31; ++i) {
0899 if (i == 24)
0900 i = 28;
0901 UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
0902 }
0903
0904
0905 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
0906 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
0907 uasm_i_mtc0(&p, K0, C0_HWRENA);
0908
0909
0910 UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
0911 uasm_i_jr(&p, RA);
0912 uasm_i_nop(&p);
0913
0914 return p;
0915 }
0916