Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Generation of main entry point for the guest, exception handling.
0007  *
0008  * Copyright (C) 2012  MIPS Technologies, Inc.
0009  * Authors: Sanjay Lal <sanjayl@kymasys.com>
0010  *
0011  * Copyright (C) 2016 Imagination Technologies Ltd.
0012  */
0013 
0014 #include <linux/kvm_host.h>
0015 #include <linux/log2.h>
0016 #include <asm/mmu_context.h>
0017 #include <asm/msa.h>
0018 #include <asm/setup.h>
0019 #include <asm/tlbex.h>
0020 #include <asm/uasm.h>
0021 
0022 /* Register names */
0023 #define ZERO        0
0024 #define AT      1
0025 #define V0      2
0026 #define V1      3
0027 #define A0      4
0028 #define A1      5
0029 
0030 #if _MIPS_SIM == _MIPS_SIM_ABI32
0031 #define T0      8
0032 #define T1      9
0033 #define T2      10
0034 #define T3      11
0035 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
0036 
0037 #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
0038 #define T0      12
0039 #define T1      13
0040 #define T2      14
0041 #define T3      15
0042 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
0043 
0044 #define S0      16
0045 #define S1      17
0046 #define T9      25
0047 #define K0      26
0048 #define K1      27
0049 #define GP      28
0050 #define SP      29
0051 #define RA      31
0052 
0053 /* Some CP0 registers */
0054 #define C0_PWBASE   5, 5
0055 #define C0_HWRENA   7, 0
0056 #define C0_BADVADDR 8, 0
0057 #define C0_BADINSTR 8, 1
0058 #define C0_BADINSTRP    8, 2
0059 #define C0_PGD      9, 7
0060 #define C0_ENTRYHI  10, 0
0061 #define C0_GUESTCTL1    10, 4
0062 #define C0_STATUS   12, 0
0063 #define C0_GUESTCTL0    12, 6
0064 #define C0_CAUSE    13, 0
0065 #define C0_EPC      14, 0
0066 #define C0_EBASE    15, 1
0067 #define C0_CONFIG5  16, 5
0068 #define C0_DDATA_LO 28, 3
0069 #define C0_ERROREPC 30, 0
0070 
0071 #define CALLFRAME_SIZ   32
0072 
0073 #ifdef CONFIG_64BIT
0074 #define ST0_KX_IF_64    ST0_KX
0075 #else
0076 #define ST0_KX_IF_64    0
0077 #endif
0078 
0079 static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
0080 static unsigned int scratch_tmp[2] = { C0_ERROREPC };
0081 
0082 enum label_id {
0083     label_fpu_1 = 1,
0084     label_msa_1,
0085     label_return_to_host,
0086     label_kernel_asid,
0087     label_exit_common,
0088 };
0089 
0090 UASM_L_LA(_fpu_1)
0091 UASM_L_LA(_msa_1)
0092 UASM_L_LA(_return_to_host)
0093 UASM_L_LA(_kernel_asid)
0094 UASM_L_LA(_exit_common)
0095 
0096 static void *kvm_mips_build_enter_guest(void *addr);
0097 static void *kvm_mips_build_ret_from_exit(void *addr);
0098 static void *kvm_mips_build_ret_to_guest(void *addr);
0099 static void *kvm_mips_build_ret_to_host(void *addr);
0100 
0101 /*
0102  * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
0103  * we assume symmetry.
0104  */
0105 static int c0_kscratch(void)
0106 {
0107     return 31;
0108 }
0109 
0110 /**
0111  * kvm_mips_entry_setup() - Perform global setup for entry code.
0112  *
0113  * Perform global setup for entry code, such as choosing a scratch register.
0114  *
0115  * Returns: 0 on success.
0116  *      -errno on failure.
0117  */
0118 int kvm_mips_entry_setup(void)
0119 {
0120     /*
0121      * We prefer to use KScratchN registers if they are available over the
0122      * defaults above, which may not work on all cores.
0123      */
0124     unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
0125 
0126     if (pgd_reg != -1)
0127         kscratch_mask &= ~BIT(pgd_reg);
0128 
0129     /* Pick a scratch register for storing VCPU */
0130     if (kscratch_mask) {
0131         scratch_vcpu[0] = c0_kscratch();
0132         scratch_vcpu[1] = ffs(kscratch_mask) - 1;
0133         kscratch_mask &= ~BIT(scratch_vcpu[1]);
0134     }
0135 
0136     /* Pick a scratch register to use as a temp for saving state */
0137     if (kscratch_mask) {
0138         scratch_tmp[0] = c0_kscratch();
0139         scratch_tmp[1] = ffs(kscratch_mask) - 1;
0140         kscratch_mask &= ~BIT(scratch_tmp[1]);
0141     }
0142 
0143     return 0;
0144 }
0145 
0146 static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
0147                     unsigned int frame)
0148 {
0149     /* Save the VCPU scratch register value in cp0_epc of the stack frame */
0150     UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
0151     UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
0152 
0153     /* Save the temp scratch register value in cp0_cause of stack frame */
0154     if (scratch_tmp[0] == c0_kscratch()) {
0155         UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
0156         UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
0157     }
0158 }
0159 
0160 static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
0161                        unsigned int frame)
0162 {
0163     /*
0164      * Restore host scratch register values saved by
0165      * kvm_mips_build_save_scratch().
0166      */
0167     UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
0168     UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
0169 
0170     if (scratch_tmp[0] == c0_kscratch()) {
0171         UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
0172         UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
0173     }
0174 }
0175 
0176 /**
0177  * build_set_exc_base() - Assemble code to write exception base address.
0178  * @p:      Code buffer pointer.
0179  * @reg:    Source register (generated code may set WG bit in @reg).
0180  *
0181  * Assemble code to modify the exception base address in the EBase register,
0182  * using the appropriately sized access and setting the WG bit if necessary.
0183  */
0184 static inline void build_set_exc_base(u32 **p, unsigned int reg)
0185 {
0186     if (cpu_has_ebase_wg) {
0187         /* Set WG so that all the bits get written */
0188         uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
0189         UASM_i_MTC0(p, reg, C0_EBASE);
0190     } else {
0191         uasm_i_mtc0(p, reg, C0_EBASE);
0192     }
0193 }
0194 
0195 /**
0196  * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
0197  * @addr:   Address to start writing code.
0198  *
0199  * Assemble the start of the vcpu_run function to run a guest VCPU. The function
0200  * conforms to the following prototype:
0201  *
0202  * int vcpu_run(struct kvm_vcpu *vcpu);
0203  *
0204  * The exit from the guest and return to the caller is handled by the code
0205  * generated by kvm_mips_build_ret_to_host().
0206  *
0207  * Returns: Next address after end of written function.
0208  */
0209 void *kvm_mips_build_vcpu_run(void *addr)
0210 {
0211     u32 *p = addr;
0212     unsigned int i;
0213 
0214     /*
0215      * A0: vcpu
0216      */
0217 
0218     /* k0/k1 not being used in host kernel context */
0219     UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
0220     for (i = 16; i < 32; ++i) {
0221         if (i == 24)
0222             i = 28;
0223         UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
0224     }
0225 
0226     /* Save host status */
0227     uasm_i_mfc0(&p, V0, C0_STATUS);
0228     UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
0229 
0230     /* Save scratch registers, will be used to store pointer to vcpu etc */
0231     kvm_mips_build_save_scratch(&p, V1, K1);
0232 
0233     /* VCPU scratch register has pointer to vcpu */
0234     UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
0235 
0236     /* Offset into vcpu->arch */
0237     UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
0238 
0239     /*
0240      * Save the host stack to VCPU, used for exception processing
0241      * when we exit from the Guest
0242      */
0243     UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
0244 
0245     /* Save the kernel gp as well */
0246     UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
0247 
0248     /*
0249      * Setup status register for running the guest in UM, interrupts
0250      * are disabled
0251      */
0252     UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
0253     uasm_i_mtc0(&p, K0, C0_STATUS);
0254     uasm_i_ehb(&p);
0255 
0256     /* load up the new EBASE */
0257     UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
0258     build_set_exc_base(&p, K0);
0259 
0260     /*
0261      * Now that the new EBASE has been loaded, unset BEV, set
0262      * interrupt mask as it was but make sure that timer interrupts
0263      * are enabled
0264      */
0265     uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
0266     uasm_i_andi(&p, V0, V0, ST0_IM);
0267     uasm_i_or(&p, K0, K0, V0);
0268     uasm_i_mtc0(&p, K0, C0_STATUS);
0269     uasm_i_ehb(&p);
0270 
0271     p = kvm_mips_build_enter_guest(p);
0272 
0273     return p;
0274 }
0275 
0276 /**
0277  * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
0278  * @addr:   Address to start writing code.
0279  *
0280  * Assemble the code to resume guest execution. This code is common between the
0281  * initial entry into the guest from the host, and returning from the exit
0282  * handler back to the guest.
0283  *
0284  * Returns: Next address after end of written function.
0285  */
0286 static void *kvm_mips_build_enter_guest(void *addr)
0287 {
0288     u32 *p = addr;
0289     unsigned int i;
0290     struct uasm_label labels[2];
0291     struct uasm_reloc relocs[2];
0292     struct uasm_label __maybe_unused *l = labels;
0293     struct uasm_reloc __maybe_unused *r = relocs;
0294 
0295     memset(labels, 0, sizeof(labels));
0296     memset(relocs, 0, sizeof(relocs));
0297 
0298     /* Set Guest EPC */
0299     UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
0300     UASM_i_MTC0(&p, T0, C0_EPC);
0301 
0302     /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
0303     if (cpu_has_ldpte)
0304         UASM_i_MFC0(&p, K0, C0_PWBASE);
0305     else
0306         UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
0307     UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
0308 
0309     /*
0310      * Set up KVM GPA pgd.
0311      * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
0312      * - call tlbmiss_handler_setup_pgd(mm->pgd)
0313      * - write mm->pgd into CP0_PWBase
0314      *
0315      * We keep S0 pointing at struct kvm so we can load the ASID below.
0316      */
0317     UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
0318               (int)offsetof(struct kvm_vcpu, arch), K1);
0319     UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
0320     UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
0321     uasm_i_jalr(&p, RA, T9);
0322     /* delay slot */
0323     if (cpu_has_htw)
0324         UASM_i_MTC0(&p, A0, C0_PWBASE);
0325     else
0326         uasm_i_nop(&p);
0327 
0328     /* Set GM bit to setup eret to VZ guest context */
0329     uasm_i_addiu(&p, V1, ZERO, 1);
0330     uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
0331     uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
0332     uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
0333 
0334     if (cpu_has_guestid) {
0335         /*
0336          * Set root mode GuestID, so that root TLB refill handler can
0337          * use the correct GuestID in the root TLB.
0338          */
0339 
0340         /* Get current GuestID */
0341         uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
0342         /* Set GuestCtl1.RID = GuestCtl1.ID */
0343         uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
0344                MIPS_GCTL1_ID_WIDTH);
0345         uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
0346                MIPS_GCTL1_RID_WIDTH);
0347         uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
0348 
0349         /* GuestID handles dealiasing so we don't need to touch ASID */
0350         goto skip_asid_restore;
0351     }
0352 
0353     /* Root ASID Dealias (RAD) */
0354 
0355     /* Save host ASID */
0356     UASM_i_MFC0(&p, K0, C0_ENTRYHI);
0357     UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
0358           K1);
0359 
0360     /* Set the root ASID for the Guest */
0361     UASM_i_ADDIU(&p, T1, S0,
0362              offsetof(struct kvm, arch.gpa_mm.context.asid));
0363 
0364     /* t1: contains the base of the ASID array, need to get the cpu id  */
0365     /* smp_processor_id */
0366     uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
0367     /* index the ASID array */
0368     uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
0369     UASM_i_ADDU(&p, T3, T1, T2);
0370     UASM_i_LW(&p, K0, 0, T3);
0371 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
0372     /*
0373      * reuse ASID array offset
0374      * cpuinfo_mips is a multiple of sizeof(long)
0375      */
0376     uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
0377     uasm_i_mul(&p, T2, T2, T3);
0378 
0379     UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
0380     UASM_i_ADDU(&p, AT, AT, T2);
0381     UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
0382     uasm_i_and(&p, K0, K0, T2);
0383 #else
0384     uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
0385 #endif
0386 
0387     /* Set up KVM VZ root ASID (!guestid) */
0388     uasm_i_mtc0(&p, K0, C0_ENTRYHI);
0389 skip_asid_restore:
0390     uasm_i_ehb(&p);
0391 
0392     /* Disable RDHWR access */
0393     uasm_i_mtc0(&p, ZERO, C0_HWRENA);
0394 
0395     /* load the guest context from VCPU and return */
0396     for (i = 1; i < 32; ++i) {
0397         /* Guest k0/k1 loaded later */
0398         if (i == K0 || i == K1)
0399             continue;
0400         UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
0401     }
0402 
0403 #ifndef CONFIG_CPU_MIPSR6
0404     /* Restore hi/lo */
0405     UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
0406     uasm_i_mthi(&p, K0);
0407 
0408     UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
0409     uasm_i_mtlo(&p, K0);
0410 #endif
0411 
0412     /* Restore the guest's k0/k1 registers */
0413     UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
0414     UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
0415 
0416     /* Jump to guest */
0417     uasm_i_eret(&p);
0418 
0419     uasm_resolve_relocs(relocs, labels);
0420 
0421     return p;
0422 }
0423 
0424 /**
0425  * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
0426  * @addr:   Address to start writing code.
0427  * @handler:    Address of common handler (within range of @addr).
0428  *
0429  * Assemble TLB refill exception fast path handler for guest execution.
0430  *
0431  * Returns: Next address after end of written function.
0432  */
0433 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
0434 {
0435     u32 *p = addr;
0436     struct uasm_label labels[2];
0437     struct uasm_reloc relocs[2];
0438 #ifndef CONFIG_CPU_LOONGSON64
0439     struct uasm_label *l = labels;
0440     struct uasm_reloc *r = relocs;
0441 #endif
0442 
0443     memset(labels, 0, sizeof(labels));
0444     memset(relocs, 0, sizeof(relocs));
0445 
0446     /* Save guest k1 into scratch register */
0447     UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
0448 
0449     /* Get the VCPU pointer from the VCPU scratch register */
0450     UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
0451 
0452     /* Save guest k0 into VCPU structure */
0453     UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
0454 
0455     /*
0456      * Some of the common tlbex code uses current_cpu_type(). For KVM we
0457      * assume symmetry and just disable preemption to silence the warning.
0458      */
0459     preempt_disable();
0460 
0461 #ifdef CONFIG_CPU_LOONGSON64
0462     UASM_i_MFC0(&p, K1, C0_PGD);
0463     uasm_i_lddir(&p, K0, K1, 3);  /* global page dir */
0464 #ifndef __PAGETABLE_PMD_FOLDED
0465     uasm_i_lddir(&p, K1, K0, 1);  /* middle page dir */
0466 #endif
0467     uasm_i_ldpte(&p, K1, 0);      /* even */
0468     uasm_i_ldpte(&p, K1, 1);      /* odd */
0469     uasm_i_tlbwr(&p);
0470 #else
0471     /*
0472      * Now for the actual refill bit. A lot of this can be common with the
0473      * Linux TLB refill handler, however we don't need to handle so many
0474      * cases. We only need to handle user mode refills, and user mode runs
0475      * with 32-bit addressing.
0476      *
0477      * Therefore the branch to label_vmalloc generated by build_get_pmde64()
0478      * that isn't resolved should never actually get taken and is harmless
0479      * to leave in place for now.
0480      */
0481 
0482 #ifdef CONFIG_64BIT
0483     build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
0484 #else
0485     build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
0486 #endif
0487 
0488     /* we don't support huge pages yet */
0489 
0490     build_get_ptep(&p, K0, K1);
0491     build_update_entries(&p, K0, K1);
0492     build_tlb_write_entry(&p, &l, &r, tlb_random);
0493 #endif
0494 
0495     preempt_enable();
0496 
0497     /* Get the VCPU pointer from the VCPU scratch register again */
0498     UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
0499 
0500     /* Restore the guest's k0/k1 registers */
0501     UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
0502     uasm_i_ehb(&p);
0503     UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
0504 
0505     /* Jump to guest */
0506     uasm_i_eret(&p);
0507 
0508     return p;
0509 }
0510 
0511 /**
0512  * kvm_mips_build_exception() - Assemble first level guest exception handler.
0513  * @addr:   Address to start writing code.
0514  * @handler:    Address of common handler (within range of @addr).
0515  *
0516  * Assemble exception vector code for guest execution. The generated vector will
0517  * branch to the common exception handler generated by kvm_mips_build_exit().
0518  *
0519  * Returns: Next address after end of written function.
0520  */
0521 void *kvm_mips_build_exception(void *addr, void *handler)
0522 {
0523     u32 *p = addr;
0524     struct uasm_label labels[2];
0525     struct uasm_reloc relocs[2];
0526     struct uasm_label *l = labels;
0527     struct uasm_reloc *r = relocs;
0528 
0529     memset(labels, 0, sizeof(labels));
0530     memset(relocs, 0, sizeof(relocs));
0531 
0532     /* Save guest k1 into scratch register */
0533     UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
0534 
0535     /* Get the VCPU pointer from the VCPU scratch register */
0536     UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
0537     UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
0538 
0539     /* Save guest k0 into VCPU structure */
0540     UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
0541 
0542     /* Branch to the common handler */
0543     uasm_il_b(&p, &r, label_exit_common);
0544      uasm_i_nop(&p);
0545 
0546     uasm_l_exit_common(&l, handler);
0547     uasm_resolve_relocs(relocs, labels);
0548 
0549     return p;
0550 }
0551 
0552 /**
0553  * kvm_mips_build_exit() - Assemble common guest exit handler.
0554  * @addr:   Address to start writing code.
0555  *
0556  * Assemble the generic guest exit handling code. This is called by the
0557  * exception vectors (generated by kvm_mips_build_exception()), and calls
0558  * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
0559  * depending on the return value.
0560  *
0561  * Returns: Next address after end of written function.
0562  */
0563 void *kvm_mips_build_exit(void *addr)
0564 {
0565     u32 *p = addr;
0566     unsigned int i;
0567     struct uasm_label labels[3];
0568     struct uasm_reloc relocs[3];
0569     struct uasm_label *l = labels;
0570     struct uasm_reloc *r = relocs;
0571 
0572     memset(labels, 0, sizeof(labels));
0573     memset(relocs, 0, sizeof(relocs));
0574 
0575     /*
0576      * Generic Guest exception handler. We end up here when the guest
0577      * does something that causes a trap to kernel mode.
0578      *
0579      * Both k0/k1 registers will have already been saved (k0 into the vcpu
0580      * structure, and k1 into the scratch_tmp register).
0581      *
0582      * The k1 register will already contain the kvm_vcpu_arch pointer.
0583      */
0584 
0585     /* Start saving Guest context to VCPU */
0586     for (i = 0; i < 32; ++i) {
0587         /* Guest k0/k1 saved later */
0588         if (i == K0 || i == K1)
0589             continue;
0590         UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
0591     }
0592 
0593 #ifndef CONFIG_CPU_MIPSR6
0594     /* We need to save hi/lo and restore them on the way out */
0595     uasm_i_mfhi(&p, T0);
0596     UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
0597 
0598     uasm_i_mflo(&p, T0);
0599     UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
0600 #endif
0601 
0602     /* Finally save guest k1 to VCPU */
0603     uasm_i_ehb(&p);
0604     UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
0605     UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
0606 
0607     /* Now that context has been saved, we can use other registers */
0608 
0609     /* Restore vcpu */
0610     UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
0611 
0612     /*
0613      * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
0614      * the exception
0615      */
0616     UASM_i_MFC0(&p, K0, C0_EPC);
0617     UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
0618 
0619     UASM_i_MFC0(&p, K0, C0_BADVADDR);
0620     UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
0621           K1);
0622 
0623     uasm_i_mfc0(&p, K0, C0_CAUSE);
0624     uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
0625 
0626     if (cpu_has_badinstr) {
0627         uasm_i_mfc0(&p, K0, C0_BADINSTR);
0628         uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
0629                        host_cp0_badinstr), K1);
0630     }
0631 
0632     if (cpu_has_badinstrp) {
0633         uasm_i_mfc0(&p, K0, C0_BADINSTRP);
0634         uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
0635                        host_cp0_badinstrp), K1);
0636     }
0637 
0638     /* Now restore the host state just enough to run the handlers */
0639 
0640     /* Switch EBASE to the one used by Linux */
0641     /* load up the host EBASE */
0642     uasm_i_mfc0(&p, V0, C0_STATUS);
0643 
0644     uasm_i_lui(&p, AT, ST0_BEV >> 16);
0645     uasm_i_or(&p, K0, V0, AT);
0646 
0647     uasm_i_mtc0(&p, K0, C0_STATUS);
0648     uasm_i_ehb(&p);
0649 
0650     UASM_i_LA_mostly(&p, K0, (long)&ebase);
0651     UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
0652     build_set_exc_base(&p, K0);
0653 
0654     if (raw_cpu_has_fpu) {
0655         /*
0656          * If FPU is enabled, save FCR31 and clear it so that later
0657          * ctc1's don't trigger FPE for pending exceptions.
0658          */
0659         uasm_i_lui(&p, AT, ST0_CU1 >> 16);
0660         uasm_i_and(&p, V1, V0, AT);
0661         uasm_il_beqz(&p, &r, V1, label_fpu_1);
0662          uasm_i_nop(&p);
0663         uasm_i_cfc1(&p, T0, 31);
0664         uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
0665               K1);
0666         uasm_i_ctc1(&p, ZERO, 31);
0667         uasm_l_fpu_1(&l, p);
0668     }
0669 
0670     if (cpu_has_msa) {
0671         /*
0672          * If MSA is enabled, save MSACSR and clear it so that later
0673          * instructions don't trigger MSAFPE for pending exceptions.
0674          */
0675         uasm_i_mfc0(&p, T0, C0_CONFIG5);
0676         uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
0677         uasm_il_beqz(&p, &r, T0, label_msa_1);
0678          uasm_i_nop(&p);
0679         uasm_i_cfcmsa(&p, T0, MSA_CSR);
0680         uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
0681               K1);
0682         uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
0683         uasm_l_msa_1(&l, p);
0684     }
0685 
0686     /* Restore host ASID */
0687     if (!cpu_has_guestid) {
0688         UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
0689               K1);
0690         UASM_i_MTC0(&p, K0, C0_ENTRYHI);
0691     }
0692 
0693     /*
0694      * Set up normal Linux process pgd.
0695      * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
0696      * - call tlbmiss_handler_setup_pgd(mm->pgd)
0697      * - write mm->pgd into CP0_PWBase
0698      */
0699     UASM_i_LW(&p, A0,
0700           offsetof(struct kvm_vcpu_arch, host_pgd), K1);
0701     UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
0702     uasm_i_jalr(&p, RA, T9);
0703     /* delay slot */
0704     if (cpu_has_htw)
0705         UASM_i_MTC0(&p, A0, C0_PWBASE);
0706     else
0707         uasm_i_nop(&p);
0708 
0709     /* Clear GM bit so we don't enter guest mode when EXL is cleared */
0710     uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
0711     uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
0712     uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
0713 
0714     /* Save GuestCtl0 so we can access GExcCode after CPU migration */
0715     uasm_i_sw(&p, K0,
0716           offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
0717 
0718     if (cpu_has_guestid) {
0719         /*
0720          * Clear root mode GuestID, so that root TLB operations use the
0721          * root GuestID in the root TLB.
0722          */
0723         uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
0724         /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
0725         uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
0726                MIPS_GCTL1_RID_WIDTH);
0727         uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
0728     }
0729 
0730     /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
0731     uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
0732     uasm_i_and(&p, V0, V0, AT);
0733     uasm_i_lui(&p, AT, ST0_CU0 >> 16);
0734     uasm_i_or(&p, V0, V0, AT);
0735 #ifdef CONFIG_64BIT
0736     uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
0737 #endif
0738     uasm_i_mtc0(&p, V0, C0_STATUS);
0739     uasm_i_ehb(&p);
0740 
0741     /* Load up host GP */
0742     UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
0743 
0744     /* Need a stack before we can jump to "C" */
0745     UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
0746 
0747     /* Saved host state */
0748     UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
0749 
0750     /*
0751      * XXXKYMA do we need to load the host ASID, maybe not because the
0752      * kernel entries are marked GLOBAL, need to verify
0753      */
0754 
0755     /* Restore host scratch registers, as we'll have clobbered them */
0756     kvm_mips_build_restore_scratch(&p, K0, SP);
0757 
0758     /* Restore RDHWR access */
0759     UASM_i_LA_mostly(&p, K0, (long)&hwrena);
0760     uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
0761     uasm_i_mtc0(&p, K0, C0_HWRENA);
0762 
0763     /* Jump to handler */
0764     /*
0765      * XXXKYMA: not sure if this is safe, how large is the stack??
0766      * Now jump to the kvm_mips_handle_exit() to see if we can deal
0767      * with this in the kernel
0768      */
0769     uasm_i_move(&p, A0, S0);
0770     UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
0771     uasm_i_jalr(&p, RA, T9);
0772      UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
0773 
0774     uasm_resolve_relocs(relocs, labels);
0775 
0776     p = kvm_mips_build_ret_from_exit(p);
0777 
0778     return p;
0779 }
0780 
0781 /**
0782  * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
0783  * @addr:   Address to start writing code.
0784  *
0785  * Assemble the code to handle the return from kvm_mips_handle_exit(), either
0786  * resuming the guest or returning to the host depending on the return value.
0787  *
0788  * Returns: Next address after end of written function.
0789  */
0790 static void *kvm_mips_build_ret_from_exit(void *addr)
0791 {
0792     u32 *p = addr;
0793     struct uasm_label labels[2];
0794     struct uasm_reloc relocs[2];
0795     struct uasm_label *l = labels;
0796     struct uasm_reloc *r = relocs;
0797 
0798     memset(labels, 0, sizeof(labels));
0799     memset(relocs, 0, sizeof(relocs));
0800 
0801     /* Return from handler Make sure interrupts are disabled */
0802     uasm_i_di(&p, ZERO);
0803     uasm_i_ehb(&p);
0804 
0805     /*
0806      * XXXKYMA: k0/k1 could have been blown away if we processed
0807      * an exception while we were handling the exception from the
0808      * guest, reload k1
0809      */
0810 
0811     uasm_i_move(&p, K1, S0);
0812     UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
0813 
0814     /*
0815      * Check return value, should tell us if we are returning to the
0816      * host (handle I/O etc)or resuming the guest
0817      */
0818     uasm_i_andi(&p, T0, V0, RESUME_HOST);
0819     uasm_il_bnez(&p, &r, T0, label_return_to_host);
0820      uasm_i_nop(&p);
0821 
0822     p = kvm_mips_build_ret_to_guest(p);
0823 
0824     uasm_l_return_to_host(&l, p);
0825     p = kvm_mips_build_ret_to_host(p);
0826 
0827     uasm_resolve_relocs(relocs, labels);
0828 
0829     return p;
0830 }
0831 
0832 /**
0833  * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
0834  * @addr:   Address to start writing code.
0835  *
0836  * Assemble the code to handle return from the guest exit handler
0837  * (kvm_mips_handle_exit()) back to the guest.
0838  *
0839  * Returns: Next address after end of written function.
0840  */
0841 static void *kvm_mips_build_ret_to_guest(void *addr)
0842 {
0843     u32 *p = addr;
0844 
0845     /* Put the saved pointer to vcpu (s0) back into the scratch register */
0846     UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
0847 
0848     /* Load up the Guest EBASE to minimize the window where BEV is set */
0849     UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
0850 
0851     /* Switch EBASE back to the one used by KVM */
0852     uasm_i_mfc0(&p, V1, C0_STATUS);
0853     uasm_i_lui(&p, AT, ST0_BEV >> 16);
0854     uasm_i_or(&p, K0, V1, AT);
0855     uasm_i_mtc0(&p, K0, C0_STATUS);
0856     uasm_i_ehb(&p);
0857     build_set_exc_base(&p, T0);
0858 
0859     /* Setup status register for running guest in UM */
0860     uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
0861     UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
0862     uasm_i_and(&p, V1, V1, AT);
0863     uasm_i_mtc0(&p, V1, C0_STATUS);
0864     uasm_i_ehb(&p);
0865 
0866     p = kvm_mips_build_enter_guest(p);
0867 
0868     return p;
0869 }
0870 
0871 /**
0872  * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
0873  * @addr:   Address to start writing code.
0874  *
0875  * Assemble the code to handle return from the guest exit handler
0876  * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
0877  * function generated by kvm_mips_build_vcpu_run().
0878  *
0879  * Returns: Next address after end of written function.
0880  */
0881 static void *kvm_mips_build_ret_to_host(void *addr)
0882 {
0883     u32 *p = addr;
0884     unsigned int i;
0885 
0886     /* EBASE is already pointing to Linux */
0887     UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
0888     UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
0889 
0890     /*
0891      * r2/v0 is the return code, shift it down by 2 (arithmetic)
0892      * to recover the err code
0893      */
0894     uasm_i_sra(&p, K0, V0, 2);
0895     uasm_i_move(&p, V0, K0);
0896 
0897     /* Load context saved on the host stack */
0898     for (i = 16; i < 31; ++i) {
0899         if (i == 24)
0900             i = 28;
0901         UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
0902     }
0903 
0904     /* Restore RDHWR access */
0905     UASM_i_LA_mostly(&p, K0, (long)&hwrena);
0906     uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
0907     uasm_i_mtc0(&p, K0, C0_HWRENA);
0908 
0909     /* Restore RA, which is the address we will return to */
0910     UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
0911     uasm_i_jr(&p, RA);
0912      uasm_i_nop(&p);
0913 
0914     return p;
0915 }
0916