Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *
0004  * Copyright SUSE Linux Products GmbH 2010
0005  *
0006  * Authors: Alexander Graf <agraf@suse.de>
0007  */
0008 
0009 /* Real mode helpers */
0010 
0011 #include <asm/asm-compat.h>
0012 #include <asm/feature-fixups.h>
0013 
0014 #if defined(CONFIG_PPC_BOOK3S_64)
0015 
0016 #define GET_SHADOW_VCPU(reg)    \
0017     mr  reg, r13
0018 
0019 #elif defined(CONFIG_PPC_BOOK3S_32)
0020 
0021 #define GET_SHADOW_VCPU(reg)                \
0022     tophys(reg, r2);                \
0023     lwz     reg, (THREAD + THREAD_KVM_SVCPU)(reg);  \
0024     tophys(reg, reg)
0025 
0026 #endif
0027 
0028 /* Disable for nested KVM */
0029 #define USE_QUICK_LAST_INST
0030 
0031 
0032 /* Get helper functions for subarch specific functionality */
0033 
0034 #if defined(CONFIG_PPC_BOOK3S_64)
0035 #include "book3s_64_slb.S"
0036 #elif defined(CONFIG_PPC_BOOK3S_32)
0037 #include "book3s_32_sr.S"
0038 #endif
0039 
0040 /******************************************************************************
0041  *                                                                            *
0042  *                               Entry code                                   *
0043  *                                                                            *
0044  *****************************************************************************/
0045 
0046 .global kvmppc_handler_trampoline_enter
0047 kvmppc_handler_trampoline_enter:
0048 
0049     /* Required state:
0050      *
0051      * MSR = ~IR|DR
0052      * R1 = host R1
0053      * R2 = host R2
0054      * R4 = guest shadow MSR
0055      * R5 = normal host MSR
0056      * R6 = current host MSR (EE, IR, DR off)
0057      * LR = highmem guest exit code
0058      * all other volatile GPRS = free
0059      * SVCPU[CR] = guest CR
0060      * SVCPU[XER] = guest XER
0061      * SVCPU[CTR] = guest CTR
0062      * SVCPU[LR] = guest LR
0063      */
0064 
0065     /* r3 = shadow vcpu */
0066     GET_SHADOW_VCPU(r3)
0067 
0068     /* Save guest exit handler address and MSR */
0069     mflr    r0
0070     PPC_STL r0, HSTATE_VMHANDLER(r3)
0071     PPC_STL r5, HSTATE_HOST_MSR(r3)
0072 
0073     /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
0074     PPC_STL r1, HSTATE_HOST_R1(r3)
0075     PPC_STL r2, HSTATE_HOST_R2(r3)
0076 
0077     /* Activate guest mode, so faults get handled by KVM */
0078     li  r11, KVM_GUEST_MODE_GUEST
0079     stb r11, HSTATE_IN_GUEST(r3)
0080 
0081     /* Switch to guest segment. This is subarch specific. */
0082     LOAD_GUEST_SEGMENTS
0083 
0084 #ifdef CONFIG_PPC_BOOK3S_64
0085 BEGIN_FTR_SECTION
0086     /* Save host FSCR */
0087     mfspr   r8, SPRN_FSCR
0088     std r8, HSTATE_HOST_FSCR(r13)
0089     /* Set FSCR during guest execution */
0090     ld  r9, SVCPU_SHADOW_FSCR(r13)
0091     mtspr   SPRN_FSCR, r9
0092 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
0093 
0094     /* Some guests may need to have dcbz set to 32 byte length.
0095      *
0096      * Usually we ensure that by patching the guest's instructions
0097      * to trap on dcbz and emulate it in the hypervisor.
0098      *
0099      * If we can, we should tell the CPU to use 32 byte dcbz though,
0100      * because that's a lot faster.
0101      */
0102     lbz r0, HSTATE_RESTORE_HID5(r3)
0103     cmpwi   r0, 0
0104     beq no_dcbz32_on
0105 
0106     mfspr   r0,SPRN_HID5
0107     ori     r0, r0, 0x80        /* XXX HID5_dcbz32 = 0x80 */
0108     mtspr   SPRN_HID5,r0
0109 no_dcbz32_on:
0110 
0111 #endif /* CONFIG_PPC_BOOK3S_64 */
0112 
0113     /* Enter guest */
0114 
0115     PPC_LL  r8, SVCPU_CTR(r3)
0116     PPC_LL  r9, SVCPU_LR(r3)
0117     lwz r10, SVCPU_CR(r3)
0118     PPC_LL  r11, SVCPU_XER(r3)
0119 
0120     mtctr   r8
0121     mtlr    r9
0122     mtcr    r10
0123     mtxer   r11
0124 
0125     /* Move SRR0 and SRR1 into the respective regs */
0126     PPC_LL  r9, SVCPU_PC(r3)
0127     /* First clear RI in our current MSR value */
0128     li  r0, MSR_RI
0129     andc    r6, r6, r0
0130 
0131     PPC_LL  r0, SVCPU_R0(r3)
0132     PPC_LL  r1, SVCPU_R1(r3)
0133     PPC_LL  r2, SVCPU_R2(r3)
0134     PPC_LL  r5, SVCPU_R5(r3)
0135     PPC_LL  r7, SVCPU_R7(r3)
0136     PPC_LL  r8, SVCPU_R8(r3)
0137     PPC_LL  r10, SVCPU_R10(r3)
0138     PPC_LL  r11, SVCPU_R11(r3)
0139     PPC_LL  r12, SVCPU_R12(r3)
0140     PPC_LL  r13, SVCPU_R13(r3)
0141 
0142     MTMSR_EERI(r6)
0143     mtsrr0  r9
0144     mtsrr1  r4
0145 
0146     PPC_LL  r4, SVCPU_R4(r3)
0147     PPC_LL  r6, SVCPU_R6(r3)
0148     PPC_LL  r9, SVCPU_R9(r3)
0149     PPC_LL  r3, (SVCPU_R3)(r3)
0150 
0151     RFI_TO_GUEST
0152 kvmppc_handler_trampoline_enter_end:
0153 
0154 
0155 
0156 /******************************************************************************
0157  *                                                                            *
0158  *                               Exit code                                    *
0159  *                                                                            *
0160  *****************************************************************************/
0161 
0162 .global kvmppc_interrupt_pr
0163 kvmppc_interrupt_pr:
0164     /* 64-bit entry. Register usage at this point:
0165      *
0166      * SPRG_SCRATCH0   = guest R13
0167      * R9              = HSTATE_IN_GUEST
0168      * R12             = (guest CR << 32) | exit handler id
0169      * R13             = PACA
0170      * HSTATE.SCRATCH0 = guest R12
0171      * HSTATE.SCRATCH2 = guest R9
0172      */
0173 #ifdef CONFIG_PPC64
0174     /* Match 32-bit entry */
0175     ld  r9,HSTATE_SCRATCH2(r13)
0176     rotldi  r12, r12, 32          /* Flip R12 halves for stw */
0177     stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
0178     srdi    r12, r12, 32          /* shift trap into low half */
0179 #endif
0180 
0181 .global kvmppc_handler_trampoline_exit
0182 kvmppc_handler_trampoline_exit:
0183     /* Register usage at this point:
0184      *
0185      * SPRG_SCRATCH0   = guest R13
0186      * R12             = exit handler id
0187      * R13             = shadow vcpu (32-bit) or PACA (64-bit)
0188      * HSTATE.SCRATCH0 = guest R12
0189      * HSTATE.SCRATCH1 = guest CR
0190      */
0191 
0192     /* Save registers */
0193 
0194     PPC_STL r0, SVCPU_R0(r13)
0195     PPC_STL r1, SVCPU_R1(r13)
0196     PPC_STL r2, SVCPU_R2(r13)
0197     PPC_STL r3, SVCPU_R3(r13)
0198     PPC_STL r4, SVCPU_R4(r13)
0199     PPC_STL r5, SVCPU_R5(r13)
0200     PPC_STL r6, SVCPU_R6(r13)
0201     PPC_STL r7, SVCPU_R7(r13)
0202     PPC_STL r8, SVCPU_R8(r13)
0203     PPC_STL r9, SVCPU_R9(r13)
0204     PPC_STL r10, SVCPU_R10(r13)
0205     PPC_STL r11, SVCPU_R11(r13)
0206 
0207     /* Restore R1/R2 so we can handle faults */
0208     PPC_LL  r1, HSTATE_HOST_R1(r13)
0209     PPC_LL  r2, HSTATE_HOST_R2(r13)
0210 
0211     /* Save guest PC and MSR */
0212 #ifdef CONFIG_PPC64
0213 BEGIN_FTR_SECTION
0214     andi.   r0, r12, 0x2
0215     cmpwi   cr1, r0, 0
0216     beq 1f
0217     mfspr   r3,SPRN_HSRR0
0218     mfspr   r4,SPRN_HSRR1
0219     andi.   r12,r12,0x3ffd
0220     b   2f
0221 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
0222 #endif
0223 1:  mfsrr0  r3
0224     mfsrr1  r4
0225 2:
0226     PPC_STL r3, SVCPU_PC(r13)
0227     PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
0228 
0229     /* Get scratch'ed off registers */
0230     GET_SCRATCH0(r9)
0231     PPC_LL  r8, HSTATE_SCRATCH0(r13)
0232     lwz r7, HSTATE_SCRATCH1(r13)
0233 
0234     PPC_STL r9, SVCPU_R13(r13)
0235     PPC_STL r8, SVCPU_R12(r13)
0236     stw r7, SVCPU_CR(r13)
0237 
0238     /* Save more register state  */
0239 
0240     mfxer   r5
0241     mfdar   r6
0242     mfdsisr r7
0243     mfctr   r8
0244     mflr    r9
0245 
0246     PPC_STL r5, SVCPU_XER(r13)
0247     PPC_STL r6, SVCPU_FAULT_DAR(r13)
0248     stw r7, SVCPU_FAULT_DSISR(r13)
0249     PPC_STL r8, SVCPU_CTR(r13)
0250     PPC_STL r9, SVCPU_LR(r13)
0251 
0252     /*
0253      * In order for us to easily get the last instruction,
0254      * we got the #vmexit at, we exploit the fact that the
0255      * virtual layout is still the same here, so we can just
0256      * ld from the guest's PC address
0257      */
0258 
0259     /* We only load the last instruction when it's safe */
0260     cmpwi   r12, BOOK3S_INTERRUPT_DATA_STORAGE
0261     beq ld_last_inst
0262     cmpwi   r12, BOOK3S_INTERRUPT_PROGRAM
0263     beq ld_last_inst
0264     cmpwi   r12, BOOK3S_INTERRUPT_SYSCALL
0265     beq ld_last_prev_inst
0266     cmpwi   r12, BOOK3S_INTERRUPT_ALIGNMENT
0267     beq-    ld_last_inst
0268 #ifdef CONFIG_PPC64
0269 BEGIN_FTR_SECTION
0270     cmpwi   r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
0271     beq-    ld_last_inst
0272 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
0273 BEGIN_FTR_SECTION
0274     cmpwi   r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
0275     beq-    ld_last_inst
0276 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
0277 #endif
0278 
0279     b   no_ld_last_inst
0280 
0281 ld_last_prev_inst:
0282     addi    r3, r3, -4
0283 
0284 ld_last_inst:
0285     /* Save off the guest instruction we're at */
0286 
0287     /* In case lwz faults */
0288     li  r0, KVM_INST_FETCH_FAILED
0289 
0290 #ifdef USE_QUICK_LAST_INST
0291 
0292     /* Set guest mode to 'jump over instruction' so if lwz faults
0293      * we'll just continue at the next IP. */
0294     li  r9, KVM_GUEST_MODE_SKIP
0295     stb r9, HSTATE_IN_GUEST(r13)
0296 
0297     /*    1) enable paging for data */
0298     mfmsr   r9
0299     ori r11, r9, MSR_DR         /* Enable paging for data */
0300     mtmsr   r11
0301     sync
0302     /*    2) fetch the instruction */
0303     lwz r0, 0(r3)
0304     /*    3) disable paging again */
0305     mtmsr   r9
0306     sync
0307 
0308 #endif
0309     stw r0, SVCPU_LAST_INST(r13)
0310 
0311 no_ld_last_inst:
0312 
0313     /* Unset guest mode */
0314     li  r9, KVM_GUEST_MODE_NONE
0315     stb r9, HSTATE_IN_GUEST(r13)
0316 
0317     /* Switch back to host MMU */
0318     LOAD_HOST_SEGMENTS
0319 
0320 #ifdef CONFIG_PPC_BOOK3S_64
0321 
0322     lbz r5, HSTATE_RESTORE_HID5(r13)
0323     cmpwi   r5, 0
0324     beq no_dcbz32_off
0325 
0326     li  r4, 0
0327     mfspr   r5,SPRN_HID5
0328     rldimi  r5,r4,6,56
0329     mtspr   SPRN_HID5,r5
0330 
0331 no_dcbz32_off:
0332 
0333 BEGIN_FTR_SECTION
0334     /* Save guest FSCR on a FAC_UNAVAIL interrupt */
0335     cmpwi   r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
0336     bne+    no_fscr_save
0337     mfspr   r7, SPRN_FSCR
0338     std r7, SVCPU_SHADOW_FSCR(r13)
0339 no_fscr_save:
0340     /* Restore host FSCR */
0341     ld  r8, HSTATE_HOST_FSCR(r13)
0342     mtspr   SPRN_FSCR, r8
0343 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
0344 
0345 #endif /* CONFIG_PPC_BOOK3S_64 */
0346 
0347     /*
0348      * For some interrupts, we need to call the real Linux
0349      * handler, so it can do work for us. This has to happen
0350      * as if the interrupt arrived from the kernel though,
0351      * so let's fake it here where most state is restored.
0352      *
0353      * Having set up SRR0/1 with the address where we want
0354      * to continue with relocation on (potentially in module
0355      * space), we either just go straight there with rfi[d],
0356      * or we jump to an interrupt handler if there is an
0357      * interrupt to be handled first.  In the latter case,
0358      * the rfi[d] at the end of the interrupt handler will
0359      * get us back to where we want to continue.
0360      */
0361 
0362     /* Register usage at this point:
0363      *
0364      * R1       = host R1
0365      * R2       = host R2
0366      * R10      = raw exit handler id
0367      * R12      = exit handler id
0368      * R13      = shadow vcpu (32-bit) or PACA (64-bit)
0369      * SVCPU.*  = guest *
0370      *
0371      */
0372 
0373     PPC_LL  r6, HSTATE_HOST_MSR(r13)
0374 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0375     /*
0376      * We don't want to change MSR[TS] bits via rfi here.
0377      * The actual TM handling logic will be in host with
0378      * recovered DR/IR bits after HSTATE_VMHANDLER.
0379      * And MSR_TM can be enabled in HOST_MSR so rfid may
0380      * not suppress this change and can lead to exception.
0381      * Manually set MSR to prevent TS state change here.
0382      */
0383     mfmsr   r7
0384     rldicl  r7, r7, 64 - MSR_TS_S_LG, 62
0385     rldimi  r6, r7, MSR_TS_S_LG, 63 - MSR_TS_T_LG
0386 #endif
0387     PPC_LL  r8, HSTATE_VMHANDLER(r13)
0388 
0389 #ifdef CONFIG_PPC64
0390 BEGIN_FTR_SECTION
0391     beq cr1, 1f
0392     mtspr   SPRN_HSRR1, r6
0393     mtspr   SPRN_HSRR0, r8
0394 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
0395 #endif
0396 1:  /* Restore host msr -> SRR1 */
0397     mtsrr1  r6
0398     /* Load highmem handler address */
0399     mtsrr0  r8
0400 
0401     /* RFI into the highmem handler, or jump to interrupt handler */
0402     cmpwi   r12, BOOK3S_INTERRUPT_EXTERNAL
0403     beqa    BOOK3S_INTERRUPT_EXTERNAL
0404     cmpwi   r12, BOOK3S_INTERRUPT_DECREMENTER
0405     beqa    BOOK3S_INTERRUPT_DECREMENTER
0406     cmpwi   r12, BOOK3S_INTERRUPT_PERFMON
0407     beqa    BOOK3S_INTERRUPT_PERFMON
0408     cmpwi   r12, BOOK3S_INTERRUPT_DOORBELL
0409     beqa    BOOK3S_INTERRUPT_DOORBELL
0410 
0411     RFI_TO_KERNEL
0412 kvmppc_handler_trampoline_exit_end: