Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 #include <asm/asm-offsets.h>
0003 #include <asm/cache.h>
0004 #include <asm/code-patching-asm.h>
0005 #include <asm/exception-64s.h>
0006 #include <asm/export.h>
0007 #include <asm/kvm_asm.h>
0008 #include <asm/kvm_book3s_asm.h>
0009 #include <asm/mmu.h>
0010 #include <asm/ppc_asm.h>
0011 #include <asm/ptrace.h>
0012 #include <asm/reg.h>
0013 #include <asm/ultravisor-api.h>
0014 
0015 /*
0016  * These are branched to from interrupt handlers in exception-64s.S which set
0017  * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
0018  */
0019 
0020 /*
0021  * This is a hcall, so register convention is as
0022  * Documentation/powerpc/papr_hcalls.rst.
0023  *
0024  * This may also be a syscall from PR-KVM userspace that is to be
0025  * reflected to the PR guest kernel, so registers may be set up for
0026  * a system call rather than hcall. We don't currently clobber
0027  * anything here, but the 0xc00 handler has already clobbered CTR
0028  * and CR0, so PR-KVM can not support a guest kernel that preserves
0029  * those registers across its system calls.
0030  *
0031  * The state of registers is as kvmppc_interrupt, except CFAR is not
0032  * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
0033  */
0034 .global kvmppc_hcall
0035 .balign IFETCH_ALIGN_BYTES
0036 kvmppc_hcall:
0037 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0038     lbz r10,HSTATE_IN_GUEST(r13)
0039     cmpwi   r10,KVM_GUEST_MODE_HV_P9
0040     beq kvmppc_p9_exit_hcall
0041 #endif
0042     ld  r10,PACA_EXGEN+EX_R13(r13)
0043     SET_SCRATCH0(r10)
0044     li  r10,0xc00
0045     /* Now we look like kvmppc_interrupt */
0046     li  r11,PACA_EXGEN
0047     b   .Lgot_save_area
0048 
0049 /*
0050  * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
0051  * call convention:
0052  *
0053  * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
0054  * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
0055  * guest R13 also saved in SCRATCH0
0056  * R13      = PACA
0057  * R11      = (H)SRR0
0058  * R12      = (H)SRR1
0059  * R9       = guest CR
0060  * PPR is set to medium
0061  *
0062  * With the addition for KVM:
0063  * R10      = trap vector
0064  */
0065 .global kvmppc_interrupt
0066 .balign IFETCH_ALIGN_BYTES
0067 kvmppc_interrupt:
0068 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0069     std r10,HSTATE_SCRATCH0(r13)
0070     lbz r10,HSTATE_IN_GUEST(r13)
0071     cmpwi   r10,KVM_GUEST_MODE_HV_P9
0072     beq kvmppc_p9_exit_interrupt
0073     ld  r10,HSTATE_SCRATCH0(r13)
0074 #endif
0075     li  r11,PACA_EXGEN
0076     cmpdi   r10,0x200
0077     bgt+    .Lgot_save_area
0078     li  r11,PACA_EXMC
0079     beq .Lgot_save_area
0080     li  r11,PACA_EXNMI
0081 .Lgot_save_area:
0082     add r11,r11,r13
0083 BEGIN_FTR_SECTION
0084     ld  r12,EX_CFAR(r11)
0085     std r12,HSTATE_CFAR(r13)
0086 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
0087     ld  r12,EX_CTR(r11)
0088     mtctr   r12
0089 BEGIN_FTR_SECTION
0090     ld  r12,EX_PPR(r11)
0091     std r12,HSTATE_PPR(r13)
0092 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0093     ld  r12,EX_R12(r11)
0094     std r12,HSTATE_SCRATCH0(r13)
0095     sldi    r12,r9,32
0096     or  r12,r12,r10
0097     ld  r9,EX_R9(r11)
0098     ld  r10,EX_R10(r11)
0099     ld  r11,EX_R11(r11)
0100 
0101     /*
0102      * Hcalls and other interrupts come here after normalising register
0103      * contents and save locations:
0104      *
0105      * R12      = (guest CR << 32) | interrupt vector
0106      * R13      = PACA
0107      * guest R12 saved in shadow HSTATE_SCRATCH0
0108      * guest R13 saved in SPRN_SCRATCH0
0109      */
0110     std r9,HSTATE_SCRATCH2(r13)
0111     lbz r9,HSTATE_IN_GUEST(r13)
0112     cmpwi   r9,KVM_GUEST_MODE_SKIP
0113     beq-    .Lmaybe_skip
0114 .Lno_skip:
0115 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0116 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
0117     cmpwi   r9,KVM_GUEST_MODE_GUEST
0118     beq kvmppc_interrupt_pr
0119 #endif
0120     b   kvmppc_interrupt_hv
0121 #else
0122     b   kvmppc_interrupt_pr
0123 #endif
0124 
0125 /*
0126  * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
0127  * the faulting instruction in guest memory from the hypervisor without
0128  * walking page tables.
0129  *
0130  * When the guest takes a fault that requires the hypervisor to load the
0131  * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
0132  * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
0133  * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
0134  * but loads and stores will access the guest context. This is used to load
0135  * the faulting instruction using the faulting guest effective address.
0136  *
0137  * However the guest context may not be able to translate, or it may cause a
0138  * machine check or other issue, which results in a fault in the host
0139  * (even with KVM-HV).
0140  *
0141  * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
0142  * are (or are likely) caused by that load, the instruction is skipped by
0143  * just returning with the PC advanced +4, where it is noticed the load did
0144  * not execute and it goes to the slow path which walks the page tables to
0145  * read guest memory.
0146  */
0147 .Lmaybe_skip:
0148     cmpwi   r12,BOOK3S_INTERRUPT_MACHINE_CHECK
0149     beq 1f
0150     cmpwi   r12,BOOK3S_INTERRUPT_DATA_STORAGE
0151     beq 1f
0152     cmpwi   r12,BOOK3S_INTERRUPT_DATA_SEGMENT
0153     beq 1f
0154 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0155     /* HSRR interrupts get 2 added to interrupt number */
0156     cmpwi   r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
0157     beq 2f
0158 #endif
0159     b   .Lno_skip
0160 1:  mfspr   r9,SPRN_SRR0
0161     addi    r9,r9,4
0162     mtspr   SPRN_SRR0,r9
0163     ld  r12,HSTATE_SCRATCH0(r13)
0164     ld  r9,HSTATE_SCRATCH2(r13)
0165     GET_SCRATCH0(r13)
0166     RFI_TO_KERNEL
0167 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0168 2:  mfspr   r9,SPRN_HSRR0
0169     addi    r9,r9,4
0170     mtspr   SPRN_HSRR0,r9
0171     ld  r12,HSTATE_SCRATCH0(r13)
0172     ld  r9,HSTATE_SCRATCH2(r13)
0173     GET_SCRATCH0(r13)
0174     HRFI_TO_KERNEL
0175 #endif
0176 
0177 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0178 
0179 /* Stack frame offsets for kvmppc_p9_enter_guest */
0180 #define SFS         (144 + STACK_FRAME_MIN_SIZE)
0181 #define STACK_SLOT_NVGPRS   (SFS - 144) /* 18 gprs */
0182 
0183 /*
0184  * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
0185  *
0186  * Enter the guest on a ISAv3.0 or later system.
0187  */
0188 .balign IFETCH_ALIGN_BYTES
0189 _GLOBAL(kvmppc_p9_enter_guest)
0190 EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
0191     mflr    r0
0192     std r0,PPC_LR_STKOFF(r1)
0193     stdu    r1,-SFS(r1)
0194 
0195     std r1,HSTATE_HOST_R1(r13)
0196 
0197     mfcr    r4
0198     stw r4,SFS+8(r1)
0199 
0200     reg = 14
0201     .rept   18
0202     std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
0203     reg = reg + 1
0204     .endr
0205 
0206     ld  r4,VCPU_LR(r3)
0207     mtlr    r4
0208     ld  r4,VCPU_CTR(r3)
0209     mtctr   r4
0210     ld  r4,VCPU_XER(r3)
0211     mtspr   SPRN_XER,r4
0212 
0213     ld  r1,VCPU_CR(r3)
0214 
0215 BEGIN_FTR_SECTION
0216     ld  r4,VCPU_CFAR(r3)
0217     mtspr   SPRN_CFAR,r4
0218 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
0219 BEGIN_FTR_SECTION
0220     ld  r4,VCPU_PPR(r3)
0221     mtspr   SPRN_PPR,r4
0222 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0223 
0224     reg = 4
0225     .rept   28
0226     ld  reg,__VCPU_GPR(reg)(r3)
0227     reg = reg + 1
0228     .endr
0229 
0230     ld  r4,VCPU_KVM(r3)
0231     lbz r4,KVM_SECURE_GUEST(r4)
0232     cmpdi   r4,0
0233     ld  r4,VCPU_GPR(R4)(r3)
0234     bne .Lret_to_ultra
0235 
0236     mtcr    r1
0237 
0238     ld  r0,VCPU_GPR(R0)(r3)
0239     ld  r1,VCPU_GPR(R1)(r3)
0240     ld  r2,VCPU_GPR(R2)(r3)
0241     ld  r3,VCPU_GPR(R3)(r3)
0242 
0243     HRFI_TO_GUEST
0244     b   .
0245 
0246     /*
0247      * Use UV_RETURN ultracall to return control back to the Ultravisor
0248      * after processing an hypercall or interrupt that was forwarded
0249      * (a.k.a. reflected) to the Hypervisor.
0250      *
0251      * All registers have already been reloaded except the ucall requires:
0252      *   R0 = hcall result
0253      *   R2 = SRR1, so UV can detect a synthesized interrupt (if any)
0254      *   R3 = UV_RETURN
0255      */
0256 .Lret_to_ultra:
0257     mtcr    r1
0258     ld  r1,VCPU_GPR(R1)(r3)
0259 
0260     ld  r0,VCPU_GPR(R3)(r3)
0261     mfspr   r2,SPRN_SRR1
0262     LOAD_REG_IMMEDIATE(r3, UV_RETURN)
0263     sc  2
0264 
0265 /*
0266  * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
0267  * above if the interrupt was taken for a guest that was entered via
0268  * kvmppc_p9_enter_guest().
0269  *
0270  * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
0271  * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
0272  * establishes the host stack and registers to return from the
0273  * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
0274  * (SPRs and FP, VEC, etc).
0275  */
0276 .balign IFETCH_ALIGN_BYTES
0277 kvmppc_p9_exit_hcall:
0278     mfspr   r11,SPRN_SRR0
0279     mfspr   r12,SPRN_SRR1
0280     li  r10,0xc00
0281     std r10,HSTATE_SCRATCH0(r13)
0282 
0283 .balign IFETCH_ALIGN_BYTES
0284 kvmppc_p9_exit_interrupt:
0285     /*
0286      * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
0287      * hypervisor, that means we can't return from the entry stack.
0288      */
0289     rldicl. r10,r12,64-MSR_HV_LG,63
0290     bne-    kvmppc_p9_bad_interrupt
0291 
0292     std     r1,HSTATE_SCRATCH1(r13)
0293     std     r3,HSTATE_SCRATCH2(r13)
0294     ld  r1,HSTATE_HOST_R1(r13)
0295     ld  r3,HSTATE_KVM_VCPU(r13)
0296 
0297     std r9,VCPU_CR(r3)
0298 
0299 1:
0300     std r11,VCPU_PC(r3)
0301     std r12,VCPU_MSR(r3)
0302 
0303     reg = 14
0304     .rept   18
0305     std reg,__VCPU_GPR(reg)(r3)
0306     reg = reg + 1
0307     .endr
0308 
0309     /* r1, r3, r9-r13 are saved to vcpu by C code */
0310     std r0,VCPU_GPR(R0)(r3)
0311     std r2,VCPU_GPR(R2)(r3)
0312     reg = 4
0313     .rept   5
0314     std reg,__VCPU_GPR(reg)(r3)
0315     reg = reg + 1
0316     .endr
0317 
0318     ld  r2,PACATOC(r13)
0319 
0320     mflr    r4
0321     std r4,VCPU_LR(r3)
0322     mfspr   r4,SPRN_XER
0323     std r4,VCPU_XER(r3)
0324 
0325     reg = 14
0326     .rept   18
0327     ld  reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
0328     reg = reg + 1
0329     .endr
0330 
0331     lwz r4,SFS+8(r1)
0332     mtcr    r4
0333 
0334     /*
0335      * Flush the link stack here, before executing the first blr on the
0336      * way out of the guest.
0337      *
0338      * The link stack won't match coming out of the guest anyway so the
0339      * only cost is the flush itself. The call clobbers r0.
0340      */
0341 1:  nop
0342     patch_site 1b patch__call_kvm_flush_link_stack_p9
0343 
0344     addi    r1,r1,SFS
0345     ld  r0,PPC_LR_STKOFF(r1)
0346     mtlr    r0
0347     blr
0348 
0349 /*
0350  * Took an interrupt somewhere right before HRFID to guest, so registers are
0351  * in a bad way. Return things hopefully enough to run host virtual code and
0352  * run the Linux interrupt handler (SRESET or MCE) to print something useful.
0353  *
0354  * We could be really clever and save all host registers in known locations
0355  * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
0356  * return address to a fixup that sets them up again. But that's a lot of
0357  * effort for a small bit of code. Lots of other things to do first.
0358  */
0359 kvmppc_p9_bad_interrupt:
0360 BEGIN_MMU_FTR_SECTION
0361     /*
0362      * Hash host doesn't try to recover MMU (requires host SLB reload)
0363      */
0364     b   .
0365 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
0366     /*
0367      * Clean up guest registers to give host a chance to run.
0368      */
0369     li  r10,0
0370     mtspr   SPRN_AMR,r10
0371     mtspr   SPRN_IAMR,r10
0372     mtspr   SPRN_CIABR,r10
0373     mtspr   SPRN_DAWRX0,r10
0374 BEGIN_FTR_SECTION
0375     mtspr   SPRN_DAWRX1,r10
0376 END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
0377 
0378     /*
0379      * Switch to host MMU mode (don't have the real host PID but we aren't
0380      * going back to userspace).
0381      */
0382     hwsync
0383     isync
0384 
0385     mtspr   SPRN_PID,r10
0386 
0387     ld  r10, HSTATE_KVM_VCPU(r13)
0388     ld  r10, VCPU_KVM(r10)
0389     lwz r10, KVM_HOST_LPID(r10)
0390     mtspr   SPRN_LPID,r10
0391 
0392     ld  r10, HSTATE_KVM_VCPU(r13)
0393     ld  r10, VCPU_KVM(r10)
0394     ld  r10, KVM_HOST_LPCR(r10)
0395     mtspr   SPRN_LPCR,r10
0396 
0397     isync
0398 
0399     /*
0400      * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
0401      * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
0402      */
0403     li  r10,KVM_GUEST_MODE_NONE
0404     stb r10,HSTATE_IN_GUEST(r13)
0405     li  r10,MSR_RI
0406     andc    r12,r12,r10
0407 
0408     /*
0409      * Go back to interrupt handler. MCE and SRESET have their specific
0410      * PACA save area so they should be used directly. They set up their
0411      * own stack. The other handlers all use EXGEN. They will use the
0412      * guest r1 if it looks like a kernel stack, so just load the
0413      * emergency stack and go to program check for all other interrupts.
0414      */
0415     ld  r10,HSTATE_SCRATCH0(r13)
0416     cmpwi   r10,BOOK3S_INTERRUPT_MACHINE_CHECK
0417     beq .Lcall_machine_check_common
0418 
0419     cmpwi   r10,BOOK3S_INTERRUPT_SYSTEM_RESET
0420     beq .Lcall_system_reset_common
0421 
0422     b   .
0423 
0424 .Lcall_machine_check_common:
0425     b   machine_check_common
0426 
0427 .Lcall_system_reset_common:
0428     b   system_reset_common
0429 #endif