Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *
0004  * Copyright IBM Corp. 2007
0005  * Copyright 2011 Freescale Semiconductor, Inc.
0006  *
0007  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
0008  */
0009 
0010 #include <asm/ppc_asm.h>
0011 #include <asm/kvm_asm.h>
0012 #include <asm/reg.h>
0013 #include <asm/page.h>
0014 #include <asm/asm-offsets.h>
0015 
0016 /* The host stack layout: */
0017 #define HOST_R1         0 /* Implied by stwu. */
0018 #define HOST_CALLEE_LR  4
0019 #define HOST_RUN        8
0020 /* r2 is special: it holds 'current', and it made nonvolatile in the
0021  * kernel with the -ffixed-r2 gcc option. */
0022 #define HOST_R2         12
0023 #define HOST_CR         16
0024 #define HOST_NV_GPRS    20
0025 #define __HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
0026 #define HOST_NV_GPR(n)  __HOST_NV_GPR(__REG_##n)
0027 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
0028 #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
0029 #define HOST_STACK_LR   (HOST_STACK_SIZE + 4) /* In caller stack frame. */
0030 
0031 #define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
0032                         (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
0033                         (1<<BOOKE_INTERRUPT_DEBUG))
0034 
0035 #define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
0036                         (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
0037                         (1<<BOOKE_INTERRUPT_ALIGNMENT))
0038 
0039 #define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
0040                        (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
0041                        (1<<BOOKE_INTERRUPT_PROGRAM) | \
0042                        (1<<BOOKE_INTERRUPT_DTLB_MISS) | \
0043                        (1<<BOOKE_INTERRUPT_ALIGNMENT))
0044 
0045 .macro __KVM_HANDLER ivor_nr scratch srr0
0046     /* Get pointer to vcpu and record exit number. */
0047     mtspr   \scratch , r4
0048     mfspr   r4, SPRN_SPRG_THREAD
0049     lwz     r4, THREAD_KVM_VCPU(r4)
0050     stw r3, VCPU_GPR(R3)(r4)
0051     stw r5, VCPU_GPR(R5)(r4)
0052     stw r6, VCPU_GPR(R6)(r4)
0053     mfspr   r3, \scratch
0054     mfctr   r5
0055     stw r3, VCPU_GPR(R4)(r4)
0056     stw r5, VCPU_CTR(r4)
0057     mfspr   r3, \srr0
0058     lis r6, kvmppc_resume_host@h
0059     stw r3, VCPU_PC(r4)
0060     li  r5, \ivor_nr
0061     ori r6, r6, kvmppc_resume_host@l
0062     mtctr   r6
0063     bctr
0064 .endm
0065 
0066 .macro KVM_HANDLER ivor_nr scratch srr0
0067 _GLOBAL(kvmppc_handler_\ivor_nr)
0068     __KVM_HANDLER \ivor_nr \scratch \srr0
0069 .endm
0070 
0071 .macro KVM_DBG_HANDLER ivor_nr scratch srr0
0072 _GLOBAL(kvmppc_handler_\ivor_nr)
0073     mtspr   \scratch, r4
0074     mfspr   r4, SPRN_SPRG_THREAD
0075     lwz r4, THREAD_KVM_VCPU(r4)
0076     stw r3, VCPU_CRIT_SAVE(r4)
0077     mfcr    r3
0078     mfspr   r4, SPRN_CSRR1
0079     andi.   r4, r4, MSR_PR
0080     bne 1f
0081     /* debug interrupt happened in enter/exit path */
0082     mfspr   r4, SPRN_CSRR1
0083     rlwinm  r4, r4, 0, ~MSR_DE
0084     mtspr   SPRN_CSRR1, r4
0085     lis r4, 0xffff
0086     ori r4, r4, 0xffff
0087     mtspr   SPRN_DBSR, r4
0088     mfspr   r4, SPRN_SPRG_THREAD
0089     lwz r4, THREAD_KVM_VCPU(r4)
0090     mtcr    r3
0091     lwz     r3, VCPU_CRIT_SAVE(r4)
0092     mfspr   r4, \scratch
0093     rfci
0094 1:  /* debug interrupt happened in guest */
0095     mtcr    r3
0096     mfspr   r4, SPRN_SPRG_THREAD
0097     lwz r4, THREAD_KVM_VCPU(r4)
0098     lwz     r3, VCPU_CRIT_SAVE(r4)
0099     mfspr   r4, \scratch
0100     __KVM_HANDLER \ivor_nr \scratch \srr0
0101 .endm
0102 
0103 .macro KVM_HANDLER_ADDR ivor_nr
0104     .long   kvmppc_handler_\ivor_nr
0105 .endm
0106 
0107 .macro KVM_HANDLER_END
0108     .long   kvmppc_handlers_end
0109 .endm
0110 
0111 _GLOBAL(kvmppc_handlers_start)
0112 KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
0113 KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK  SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
0114 KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0115 KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0116 KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0117 KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0118 KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0119 KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0120 KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0121 KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0122 KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0123 KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0124 KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
0125 KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0126 KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0127 KVM_DBG_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
0128 KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0129 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0130 KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
0131 _GLOBAL(kvmppc_handlers_end)
0132 
0133 /* Registers:
0134  *  SPRG_SCRATCH0: guest r4
0135  *  r4: vcpu pointer
0136  *  r5: KVM exit number
0137  */
0138 _GLOBAL(kvmppc_resume_host)
0139     mfcr    r3
0140     stw r3, VCPU_CR(r4)
0141     stw r7, VCPU_GPR(R7)(r4)
0142     stw r8, VCPU_GPR(R8)(r4)
0143     stw r9, VCPU_GPR(R9)(r4)
0144 
0145     li  r6, 1
0146     slw r6, r6, r5
0147 
0148 #ifdef CONFIG_KVM_EXIT_TIMING
0149     /* save exit time */
0150 1:
0151     mfspr   r7, SPRN_TBRU
0152     mfspr   r8, SPRN_TBRL
0153     mfspr   r9, SPRN_TBRU
0154     cmpw    r9, r7
0155     bne 1b
0156     stw r8, VCPU_TIMING_EXIT_TBL(r4)
0157     stw r9, VCPU_TIMING_EXIT_TBU(r4)
0158 #endif
0159 
0160     /* Save the faulting instruction and all GPRs for emulation. */
0161     andi.   r7, r6, NEED_INST_MASK
0162     beq ..skip_inst_copy
0163     mfspr   r9, SPRN_SRR0
0164     mfmsr   r8
0165     ori r7, r8, MSR_DS
0166     mtmsr   r7
0167     isync
0168     lwz r9, 0(r9)
0169     mtmsr   r8
0170     isync
0171     stw r9, VCPU_LAST_INST(r4)
0172 
0173     stw r15, VCPU_GPR(R15)(r4)
0174     stw r16, VCPU_GPR(R16)(r4)
0175     stw r17, VCPU_GPR(R17)(r4)
0176     stw r18, VCPU_GPR(R18)(r4)
0177     stw r19, VCPU_GPR(R19)(r4)
0178     stw r20, VCPU_GPR(R20)(r4)
0179     stw r21, VCPU_GPR(R21)(r4)
0180     stw r22, VCPU_GPR(R22)(r4)
0181     stw r23, VCPU_GPR(R23)(r4)
0182     stw r24, VCPU_GPR(R24)(r4)
0183     stw r25, VCPU_GPR(R25)(r4)
0184     stw r26, VCPU_GPR(R26)(r4)
0185     stw r27, VCPU_GPR(R27)(r4)
0186     stw r28, VCPU_GPR(R28)(r4)
0187     stw r29, VCPU_GPR(R29)(r4)
0188     stw r30, VCPU_GPR(R30)(r4)
0189     stw r31, VCPU_GPR(R31)(r4)
0190 ..skip_inst_copy:
0191 
0192     /* Also grab DEAR and ESR before the host can clobber them. */
0193 
0194     andi.   r7, r6, NEED_DEAR_MASK
0195     beq ..skip_dear
0196     mfspr   r9, SPRN_DEAR
0197     stw r9, VCPU_FAULT_DEAR(r4)
0198 ..skip_dear:
0199 
0200     andi.   r7, r6, NEED_ESR_MASK
0201     beq ..skip_esr
0202     mfspr   r9, SPRN_ESR
0203     stw r9, VCPU_FAULT_ESR(r4)
0204 ..skip_esr:
0205 
0206     /* Save remaining volatile guest register state to vcpu. */
0207     stw r0, VCPU_GPR(R0)(r4)
0208     stw r1, VCPU_GPR(R1)(r4)
0209     stw r2, VCPU_GPR(R2)(r4)
0210     stw r10, VCPU_GPR(R10)(r4)
0211     stw r11, VCPU_GPR(R11)(r4)
0212     stw r12, VCPU_GPR(R12)(r4)
0213     stw r13, VCPU_GPR(R13)(r4)
0214     stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
0215     mflr    r3
0216     stw r3, VCPU_LR(r4)
0217     mfxer   r3
0218     stw r3, VCPU_XER(r4)
0219 
0220     /* Restore host stack pointer and PID before IVPR, since the host
0221      * exception handlers use them. */
0222     lwz r1, VCPU_HOST_STACK(r4)
0223     lwz r3, VCPU_HOST_PID(r4)
0224     mtspr   SPRN_PID, r3
0225 
0226 #ifdef CONFIG_FSL_BOOKE
0227     /* we cheat and know that Linux doesn't use PID1 which is always 0 */
0228     lis r3, 0
0229     mtspr   SPRN_PID1, r3
0230 #endif
0231 
0232     /* Restore host IVPR before re-enabling interrupts. We cheat and know
0233      * that Linux IVPR is always 0xc0000000. */
0234     lis r3, 0xc000
0235     mtspr   SPRN_IVPR, r3
0236 
0237     /* Switch to kernel stack and jump to handler. */
0238     LOAD_REG_ADDR(r3, kvmppc_handle_exit)
0239     mtctr   r3
0240     mr  r3, r4
0241     lwz r2, HOST_R2(r1)
0242     mr  r14, r4 /* Save vcpu pointer. */
0243 
0244     bctrl   /* kvmppc_handle_exit() */
0245 
0246     /* Restore vcpu pointer and the nonvolatiles we used. */
0247     mr  r4, r14
0248     lwz r14, VCPU_GPR(R14)(r4)
0249 
0250     /* Sometimes instruction emulation must restore complete GPR state. */
0251     andi.   r5, r3, RESUME_FLAG_NV
0252     beq ..skip_nv_load
0253     lwz r15, VCPU_GPR(R15)(r4)
0254     lwz r16, VCPU_GPR(R16)(r4)
0255     lwz r17, VCPU_GPR(R17)(r4)
0256     lwz r18, VCPU_GPR(R18)(r4)
0257     lwz r19, VCPU_GPR(R19)(r4)
0258     lwz r20, VCPU_GPR(R20)(r4)
0259     lwz r21, VCPU_GPR(R21)(r4)
0260     lwz r22, VCPU_GPR(R22)(r4)
0261     lwz r23, VCPU_GPR(R23)(r4)
0262     lwz r24, VCPU_GPR(R24)(r4)
0263     lwz r25, VCPU_GPR(R25)(r4)
0264     lwz r26, VCPU_GPR(R26)(r4)
0265     lwz r27, VCPU_GPR(R27)(r4)
0266     lwz r28, VCPU_GPR(R28)(r4)
0267     lwz r29, VCPU_GPR(R29)(r4)
0268     lwz r30, VCPU_GPR(R30)(r4)
0269     lwz r31, VCPU_GPR(R31)(r4)
0270 ..skip_nv_load:
0271 
0272     /* Should we return to the guest? */
0273     andi.   r5, r3, RESUME_FLAG_HOST
0274     beq lightweight_exit
0275 
0276     srawi   r3, r3, 2 /* Shift -ERR back down. */
0277 
0278 heavyweight_exit:
0279     /* Not returning to guest. */
0280 
0281 #ifdef CONFIG_SPE
0282     /* save guest SPEFSCR and load host SPEFSCR */
0283     mfspr   r9, SPRN_SPEFSCR
0284     stw r9, VCPU_SPEFSCR(r4)
0285     lwz r9, VCPU_HOST_SPEFSCR(r4)
0286     mtspr   SPRN_SPEFSCR, r9
0287 #endif
0288 
0289     /* We already saved guest volatile register state; now save the
0290      * non-volatiles. */
0291     stw r15, VCPU_GPR(R15)(r4)
0292     stw r16, VCPU_GPR(R16)(r4)
0293     stw r17, VCPU_GPR(R17)(r4)
0294     stw r18, VCPU_GPR(R18)(r4)
0295     stw r19, VCPU_GPR(R19)(r4)
0296     stw r20, VCPU_GPR(R20)(r4)
0297     stw r21, VCPU_GPR(R21)(r4)
0298     stw r22, VCPU_GPR(R22)(r4)
0299     stw r23, VCPU_GPR(R23)(r4)
0300     stw r24, VCPU_GPR(R24)(r4)
0301     stw r25, VCPU_GPR(R25)(r4)
0302     stw r26, VCPU_GPR(R26)(r4)
0303     stw r27, VCPU_GPR(R27)(r4)
0304     stw r28, VCPU_GPR(R28)(r4)
0305     stw r29, VCPU_GPR(R29)(r4)
0306     stw r30, VCPU_GPR(R30)(r4)
0307     stw r31, VCPU_GPR(R31)(r4)
0308 
0309     /* Load host non-volatile register state from host stack. */
0310     lwz r14, HOST_NV_GPR(R14)(r1)
0311     lwz r15, HOST_NV_GPR(R15)(r1)
0312     lwz r16, HOST_NV_GPR(R16)(r1)
0313     lwz r17, HOST_NV_GPR(R17)(r1)
0314     lwz r18, HOST_NV_GPR(R18)(r1)
0315     lwz r19, HOST_NV_GPR(R19)(r1)
0316     lwz r20, HOST_NV_GPR(R20)(r1)
0317     lwz r21, HOST_NV_GPR(R21)(r1)
0318     lwz r22, HOST_NV_GPR(R22)(r1)
0319     lwz r23, HOST_NV_GPR(R23)(r1)
0320     lwz r24, HOST_NV_GPR(R24)(r1)
0321     lwz r25, HOST_NV_GPR(R25)(r1)
0322     lwz r26, HOST_NV_GPR(R26)(r1)
0323     lwz r27, HOST_NV_GPR(R27)(r1)
0324     lwz r28, HOST_NV_GPR(R28)(r1)
0325     lwz r29, HOST_NV_GPR(R29)(r1)
0326     lwz r30, HOST_NV_GPR(R30)(r1)
0327     lwz r31, HOST_NV_GPR(R31)(r1)
0328 
0329     /* Return to kvm_vcpu_run(). */
0330     lwz r4, HOST_STACK_LR(r1)
0331     lwz r5, HOST_CR(r1)
0332     addi    r1, r1, HOST_STACK_SIZE
0333     mtlr    r4
0334     mtcr    r5
0335     /* r3 still contains the return code from kvmppc_handle_exit(). */
0336     blr
0337 
0338 
0339 /* Registers:
0340  *  r3: vcpu pointer
0341  */
0342 _GLOBAL(__kvmppc_vcpu_run)
0343     stwu    r1, -HOST_STACK_SIZE(r1)
0344     stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */
0345 
0346     /* Save host state to stack. */
0347     mr  r4, r3
0348     mflr    r3
0349     stw r3, HOST_STACK_LR(r1)
0350     mfcr    r5
0351     stw r5, HOST_CR(r1)
0352 
0353     /* Save host non-volatile register state to stack. */
0354     stw r14, HOST_NV_GPR(R14)(r1)
0355     stw r15, HOST_NV_GPR(R15)(r1)
0356     stw r16, HOST_NV_GPR(R16)(r1)
0357     stw r17, HOST_NV_GPR(R17)(r1)
0358     stw r18, HOST_NV_GPR(R18)(r1)
0359     stw r19, HOST_NV_GPR(R19)(r1)
0360     stw r20, HOST_NV_GPR(R20)(r1)
0361     stw r21, HOST_NV_GPR(R21)(r1)
0362     stw r22, HOST_NV_GPR(R22)(r1)
0363     stw r23, HOST_NV_GPR(R23)(r1)
0364     stw r24, HOST_NV_GPR(R24)(r1)
0365     stw r25, HOST_NV_GPR(R25)(r1)
0366     stw r26, HOST_NV_GPR(R26)(r1)
0367     stw r27, HOST_NV_GPR(R27)(r1)
0368     stw r28, HOST_NV_GPR(R28)(r1)
0369     stw r29, HOST_NV_GPR(R29)(r1)
0370     stw r30, HOST_NV_GPR(R30)(r1)
0371     stw r31, HOST_NV_GPR(R31)(r1)
0372 
0373     /* Load guest non-volatiles. */
0374     lwz r14, VCPU_GPR(R14)(r4)
0375     lwz r15, VCPU_GPR(R15)(r4)
0376     lwz r16, VCPU_GPR(R16)(r4)
0377     lwz r17, VCPU_GPR(R17)(r4)
0378     lwz r18, VCPU_GPR(R18)(r4)
0379     lwz r19, VCPU_GPR(R19)(r4)
0380     lwz r20, VCPU_GPR(R20)(r4)
0381     lwz r21, VCPU_GPR(R21)(r4)
0382     lwz r22, VCPU_GPR(R22)(r4)
0383     lwz r23, VCPU_GPR(R23)(r4)
0384     lwz r24, VCPU_GPR(R24)(r4)
0385     lwz r25, VCPU_GPR(R25)(r4)
0386     lwz r26, VCPU_GPR(R26)(r4)
0387     lwz r27, VCPU_GPR(R27)(r4)
0388     lwz r28, VCPU_GPR(R28)(r4)
0389     lwz r29, VCPU_GPR(R29)(r4)
0390     lwz r30, VCPU_GPR(R30)(r4)
0391     lwz r31, VCPU_GPR(R31)(r4)
0392 
0393 #ifdef CONFIG_SPE
0394     /* save host SPEFSCR and load guest SPEFSCR */
0395     mfspr   r3, SPRN_SPEFSCR
0396     stw r3, VCPU_HOST_SPEFSCR(r4)
0397     lwz r3, VCPU_SPEFSCR(r4)
0398     mtspr   SPRN_SPEFSCR, r3
0399 #endif
0400 
0401 lightweight_exit:
0402     stw r2, HOST_R2(r1)
0403 
0404     mfspr   r3, SPRN_PID
0405     stw r3, VCPU_HOST_PID(r4)
0406     lwz r3, VCPU_SHADOW_PID(r4)
0407     mtspr   SPRN_PID, r3
0408 
0409 #ifdef CONFIG_FSL_BOOKE
0410     lwz r3, VCPU_SHADOW_PID1(r4)
0411     mtspr   SPRN_PID1, r3
0412 #endif
0413 
0414     /* Load some guest volatiles. */
0415     lwz r0, VCPU_GPR(R0)(r4)
0416     lwz r2, VCPU_GPR(R2)(r4)
0417     lwz r9, VCPU_GPR(R9)(r4)
0418     lwz r10, VCPU_GPR(R10)(r4)
0419     lwz r11, VCPU_GPR(R11)(r4)
0420     lwz r12, VCPU_GPR(R12)(r4)
0421     lwz r13, VCPU_GPR(R13)(r4)
0422     lwz r3, VCPU_LR(r4)
0423     mtlr    r3
0424     lwz r3, VCPU_XER(r4)
0425     mtxer   r3
0426 
0427     /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
0428      * so how do we make sure vcpu won't fault? */
0429     lis r8, kvmppc_booke_handlers@ha
0430     lwz r8, kvmppc_booke_handlers@l(r8)
0431     mtspr   SPRN_IVPR, r8
0432 
0433     lwz r5, VCPU_SHARED(r4)
0434 
0435     /* Can't switch the stack pointer until after IVPR is switched,
0436      * because host interrupt handlers would get confused. */
0437     lwz r1, VCPU_GPR(R1)(r4)
0438 
0439     /*
0440      * Host interrupt handlers may have clobbered these
0441      * guest-readable SPRGs, or the guest kernel may have
0442      * written directly to the shared area, so we
0443      * need to reload them here with the guest's values.
0444      */
0445     PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
0446     mtspr   SPRN_SPRG4W, r3
0447     PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
0448     mtspr   SPRN_SPRG5W, r3
0449     PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
0450     mtspr   SPRN_SPRG6W, r3
0451     PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
0452     mtspr   SPRN_SPRG7W, r3
0453 
0454 #ifdef CONFIG_KVM_EXIT_TIMING
0455     /* save enter time */
0456 1:
0457     mfspr   r6, SPRN_TBRU
0458     mfspr   r7, SPRN_TBRL
0459     mfspr   r8, SPRN_TBRU
0460     cmpw    r8, r6
0461     bne 1b
0462     stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
0463     stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
0464 #endif
0465 
0466     /* Finish loading guest volatiles and jump to guest. */
0467     lwz r3, VCPU_CTR(r4)
0468     lwz r5, VCPU_CR(r4)
0469     lwz r6, VCPU_PC(r4)
0470     lwz r7, VCPU_SHADOW_MSR(r4)
0471     mtctr   r3
0472     mtcr    r5
0473     mtsrr0  r6
0474     mtsrr1  r7
0475     lwz r5, VCPU_GPR(R5)(r4)
0476     lwz r6, VCPU_GPR(R6)(r4)
0477     lwz r7, VCPU_GPR(R7)(r4)
0478     lwz r8, VCPU_GPR(R8)(r4)
0479 
0480     /* Clear any debug events which occurred since we disabled MSR[DE].
0481      * XXX This gives us a 3-instruction window in which a breakpoint
0482      * intended for guest context could fire in the host instead. */
0483     lis r3, 0xffff
0484     ori r3, r3, 0xffff
0485     mtspr   SPRN_DBSR, r3
0486 
0487     lwz r3, VCPU_GPR(R3)(r4)
0488     lwz r4, VCPU_GPR(R4)(r4)
0489     rfi
0490 
0491     .data
0492     .align  4
0493     .globl  kvmppc_booke_handler_addr
0494 kvmppc_booke_handler_addr:
0495 KVM_HANDLER_ADDR BOOKE_INTERRUPT_CRITICAL
0496 KVM_HANDLER_ADDR BOOKE_INTERRUPT_MACHINE_CHECK
0497 KVM_HANDLER_ADDR BOOKE_INTERRUPT_DATA_STORAGE
0498 KVM_HANDLER_ADDR BOOKE_INTERRUPT_INST_STORAGE
0499 KVM_HANDLER_ADDR BOOKE_INTERRUPT_EXTERNAL
0500 KVM_HANDLER_ADDR BOOKE_INTERRUPT_ALIGNMENT
0501 KVM_HANDLER_ADDR BOOKE_INTERRUPT_PROGRAM
0502 KVM_HANDLER_ADDR BOOKE_INTERRUPT_FP_UNAVAIL
0503 KVM_HANDLER_ADDR BOOKE_INTERRUPT_SYSCALL
0504 KVM_HANDLER_ADDR BOOKE_INTERRUPT_AP_UNAVAIL
0505 KVM_HANDLER_ADDR BOOKE_INTERRUPT_DECREMENTER
0506 KVM_HANDLER_ADDR BOOKE_INTERRUPT_FIT
0507 KVM_HANDLER_ADDR BOOKE_INTERRUPT_WATCHDOG
0508 KVM_HANDLER_ADDR BOOKE_INTERRUPT_DTLB_MISS
0509 KVM_HANDLER_ADDR BOOKE_INTERRUPT_ITLB_MISS
0510 KVM_HANDLER_ADDR BOOKE_INTERRUPT_DEBUG
0511 KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_UNAVAIL
0512 KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_DATA
0513 KVM_HANDLER_ADDR BOOKE_INTERRUPT_SPE_FP_ROUND
0514 KVM_HANDLER_END /*Always keep this in end*/
0515 
0516 #ifdef CONFIG_SPE
0517 _GLOBAL(kvmppc_save_guest_spe)
0518     cmpi    0,r3,0
0519     beqlr-
0520     SAVE_32EVRS(0, r4, r3, VCPU_EVR)
0521     evxor   evr6, evr6, evr6
0522     evmwumiaa evr6, evr6, evr6
0523     li  r4,VCPU_ACC
0524     evstddx evr6, r4, r3        /* save acc */
0525     blr
0526 
0527 _GLOBAL(kvmppc_load_guest_spe)
0528     cmpi    0,r3,0
0529     beqlr-
0530     li      r4,VCPU_ACC
0531     evlddx  evr6,r4,r3
0532     evmra   evr6,evr6       /* load acc */
0533     REST_32EVRS(0, r4, r3, VCPU_EVR)
0534     blr
0535 #endif