Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *
0004  * Derived from book3s_hv_rmhandlers.S, which is:
0005  *
0006  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
0007  */
0008 
0009 #include <asm/reg.h>
0010 #include <asm/ppc_asm.h>
0011 #include <asm/asm-offsets.h>
0012 #include <asm/export.h>
0013 #include <asm/tm.h>
0014 #include <asm/cputable.h>
0015 
0016 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0017 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
0018 
0019 /*
0020  * Save transactional state and TM-related registers.
0021  * Called with:
0022  * - r3 pointing to the vcpu struct
0023  * - r4 containing the MSR with current TS bits:
0024  *  (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
0025  * - r5 containing a flag indicating that non-volatile registers
0026  *  must be preserved.
0027  * If r5 == 0, this can modify all checkpointed registers, but
0028  * restores r1, r2 before exit.  If r5 != 0, this restores the
0029  * MSR TM/FP/VEC/VSX bits to their state on entry.
0030  */
0031 _GLOBAL(__kvmppc_save_tm)
0032     mflr    r0
0033     std r0, PPC_LR_STKOFF(r1)
0034     stdu    r1, -SWITCH_FRAME_SIZE(r1)
0035 
0036     mr  r9, r3
0037     cmpdi   cr7, r5, 0
0038 
0039     /* Turn on TM. */
0040     mfmsr   r8
0041     mr  r10, r8
0042     li  r0, 1
0043     rldimi  r8, r0, MSR_TM_LG, 63-MSR_TM_LG
0044     ori     r8, r8, MSR_FP
0045     oris    r8, r8, (MSR_VEC | MSR_VSX)@h
0046     mtmsrd  r8
0047 
0048     rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
0049     beq 1f  /* TM not active in guest. */
0050 
0051     std r1, HSTATE_SCRATCH2(r13)
0052     std r3, HSTATE_SCRATCH1(r13)
0053 
0054     /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */
0055     mfcr    r6
0056     SAVE_GPR(6, r1)
0057 
0058     /* Save DSCR so we can restore it to avoid running with user value */
0059     mfspr   r7, SPRN_DSCR
0060     SAVE_GPR(7, r1)
0061 
0062     /*
0063      * We are going to do treclaim., which will modify all checkpointed
0064      * registers.  Save the non-volatile registers on the stack if
0065      * preservation of non-volatile state has been requested.
0066      */
0067     beq cr7, 3f
0068     SAVE_NVGPRS(r1)
0069 
0070     /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */
0071     li  r0, 0
0072     rldimi  r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
0073     SAVE_GPR(10, r1)    /* final MSR value */
0074 3:
0075 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0076 BEGIN_FTR_SECTION
0077     /* Emulation of the treclaim instruction needs TEXASR before treclaim */
0078     mfspr   r6, SPRN_TEXASR
0079     std r6, VCPU_ORIG_TEXASR(r3)
0080 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
0081 #endif
0082 
0083     /* Clear the MSR RI since r1, r13 are all going to be foobar. */
0084     li  r5, 0
0085     mtmsrd  r5, 1
0086 
0087     li  r3, TM_CAUSE_KVM_RESCHED
0088 
0089     /* All GPRs are volatile at this point. */
0090     TRECLAIM(R3)
0091 
0092     /* Temporarily store r13 and r9 so we have some regs to play with */
0093     SET_SCRATCH0(r13)
0094     GET_PACA(r13)
0095     std r9, PACATMSCRATCH(r13)
0096     ld  r9, HSTATE_SCRATCH1(r13)
0097 
0098     /* Save away PPR soon so we don't run with user value. */
0099     std r0, VCPU_GPRS_TM(0)(r9)
0100     mfspr   r0, SPRN_PPR
0101     HMT_MEDIUM
0102 
0103     /* Reload stack pointer. */
0104     std r1, VCPU_GPRS_TM(1)(r9)
0105     ld  r1, HSTATE_SCRATCH2(r13)
0106 
0107     /* Set MSR RI now we have r1 and r13 back. */
0108     std r2, VCPU_GPRS_TM(2)(r9)
0109     li  r2, MSR_RI
0110     mtmsrd  r2, 1
0111 
0112     /* Reload TOC pointer. */
0113     ld  r2, PACATOC(r13)
0114 
0115     /* Save all but r0-r2, r9 & r13 */
0116     reg = 3
0117     .rept   29
0118     .if (reg != 9) && (reg != 13)
0119     std reg, VCPU_GPRS_TM(reg)(r9)
0120     .endif
0121     reg = reg + 1
0122     .endr
0123     /* ... now save r13 */
0124     GET_SCRATCH0(r4)
0125     std r4, VCPU_GPRS_TM(13)(r9)
0126     /* ... and save r9 */
0127     ld  r4, PACATMSCRATCH(r13)
0128     std r4, VCPU_GPRS_TM(9)(r9)
0129 
0130     /* Restore host DSCR and CR values, after saving guest values */
0131     mfcr    r6
0132     mfspr   r7, SPRN_DSCR
0133     stw r6, VCPU_CR_TM(r9)
0134     std r7, VCPU_DSCR_TM(r9)
0135     REST_GPR(6, r1)
0136     REST_GPR(7, r1)
0137     mtcr    r6
0138     mtspr   SPRN_DSCR, r7
0139 
0140     /* Save away checkpointed SPRs. */
0141     std r0, VCPU_PPR_TM(r9)
0142     mflr    r5
0143     mfctr   r7
0144     mfspr   r8, SPRN_AMR
0145     mfspr   r10, SPRN_TAR
0146     mfxer   r11
0147     std r5, VCPU_LR_TM(r9)
0148     std r7, VCPU_CTR_TM(r9)
0149     std r8, VCPU_AMR_TM(r9)
0150     std r10, VCPU_TAR_TM(r9)
0151     std r11, VCPU_XER_TM(r9)
0152 
0153     /* Save FP/VSX. */
0154     addi    r3, r9, VCPU_FPRS_TM
0155     bl  store_fp_state
0156     addi    r3, r9, VCPU_VRS_TM
0157     bl  store_vr_state
0158     mfspr   r6, SPRN_VRSAVE
0159     stw r6, VCPU_VRSAVE_TM(r9)
0160 
0161     /* Restore non-volatile registers if requested to */
0162     beq cr7, 1f
0163     REST_NVGPRS(r1)
0164     REST_GPR(10, r1)
0165 1:
0166     /*
0167      * We need to save these SPRs after the treclaim so that the software
0168      * error code is recorded correctly in the TEXASR.  Also the user may
0169      * change these outside of a transaction, so they must always be
0170      * context switched.
0171      */
0172     mfspr   r7, SPRN_TEXASR
0173     std r7, VCPU_TEXASR(r9)
0174     mfspr   r5, SPRN_TFHAR
0175     mfspr   r6, SPRN_TFIAR
0176     std r5, VCPU_TFHAR(r9)
0177     std r6, VCPU_TFIAR(r9)
0178 
0179     /* Restore MSR state if requested */
0180     beq cr7, 2f
0181     mtmsrd  r10, 0
0182 2:
0183     addi    r1, r1, SWITCH_FRAME_SIZE
0184     ld  r0, PPC_LR_STKOFF(r1)
0185     mtlr    r0
0186     blr
0187 
0188 /*
0189  * _kvmppc_save_tm_pr() is a wrapper around __kvmppc_save_tm(), so that it can
0190  * be invoked from C function by PR KVM only.
0191  */
0192 _GLOBAL(_kvmppc_save_tm_pr)
0193     mflr    r0
0194     std r0, PPC_LR_STKOFF(r1)
0195     stdu    r1, -PPC_MIN_STKFRM(r1)
0196 
0197     mfspr   r8, SPRN_TAR
0198     std r8, PPC_MIN_STKFRM-8(r1)
0199 
0200     li  r5, 1       /* preserve non-volatile registers */
0201     bl  __kvmppc_save_tm
0202 
0203     ld  r8, PPC_MIN_STKFRM-8(r1)
0204     mtspr   SPRN_TAR, r8
0205 
0206     addi    r1, r1, PPC_MIN_STKFRM
0207     ld  r0, PPC_LR_STKOFF(r1)
0208     mtlr    r0
0209     blr
0210 
0211 EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
0212 
0213 /*
0214  * Restore transactional state and TM-related registers.
0215  * Called with:
0216  *  - r3 pointing to the vcpu struct.
0217  *  - r4 is the guest MSR with desired TS bits:
0218  *  For HV KVM, it is VCPU_MSR
0219  *  For PR KVM, it is provided by caller
0220  * - r5 containing a flag indicating that non-volatile registers
0221  *  must be preserved.
0222  * If r5 == 0, this potentially modifies all checkpointed registers, but
0223  * restores r1, r2 from the PACA before exit.
0224  * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry.
0225  */
0226 _GLOBAL(__kvmppc_restore_tm)
0227     mflr    r0
0228     std r0, PPC_LR_STKOFF(r1)
0229 
0230     cmpdi   cr7, r5, 0
0231 
0232     /* Turn on TM/FP/VSX/VMX so we can restore them. */
0233     mfmsr   r5
0234     mr  r10, r5
0235     li  r6, MSR_TM >> 32
0236     sldi    r6, r6, 32
0237     or  r5, r5, r6
0238     ori r5, r5, MSR_FP
0239     oris    r5, r5, (MSR_VEC | MSR_VSX)@h
0240     mtmsrd  r5
0241 
0242     /*
0243      * The user may change these outside of a transaction, so they must
0244      * always be context switched.
0245      */
0246     ld  r5, VCPU_TFHAR(r3)
0247     ld  r6, VCPU_TFIAR(r3)
0248     ld  r7, VCPU_TEXASR(r3)
0249     mtspr   SPRN_TFHAR, r5
0250     mtspr   SPRN_TFIAR, r6
0251     mtspr   SPRN_TEXASR, r7
0252 
0253     mr  r5, r4
0254     rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
0255     beq 9f      /* TM not active in guest */
0256 
0257     /* Make sure the failure summary is set, otherwise we'll program check
0258      * when we trechkpt.  It's possible that this might have been not set
0259      * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
0260      * host.
0261      */
0262     oris    r7, r7, (TEXASR_FS)@h
0263     mtspr   SPRN_TEXASR, r7
0264 
0265     /*
0266      * Make a stack frame and save non-volatile registers if requested.
0267      */
0268     stdu    r1, -SWITCH_FRAME_SIZE(r1)
0269     std r1, HSTATE_SCRATCH2(r13)
0270 
0271     mfcr    r6
0272     mfspr   r7, SPRN_DSCR
0273     SAVE_GPR(2, r1)
0274     SAVE_GPR(6, r1)
0275     SAVE_GPR(7, r1)
0276 
0277     beq cr7, 4f
0278     SAVE_NVGPRS(r1)
0279 
0280     /* MSR[TS] will be 1 (suspended) once we do trechkpt */
0281     li  r0, 1
0282     rldimi  r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
0283     SAVE_GPR(10, r1)    /* final MSR value */
0284 4:
0285     /*
0286      * We need to load up the checkpointed state for the guest.
0287      * We need to do this early as it will blow away any GPRs, VSRs and
0288      * some SPRs.
0289      */
0290 
0291     mr  r31, r3
0292     addi    r3, r31, VCPU_FPRS_TM
0293     bl  load_fp_state
0294     addi    r3, r31, VCPU_VRS_TM
0295     bl  load_vr_state
0296     mr  r3, r31
0297     lwz r7, VCPU_VRSAVE_TM(r3)
0298     mtspr   SPRN_VRSAVE, r7
0299 
0300     ld  r5, VCPU_LR_TM(r3)
0301     lwz r6, VCPU_CR_TM(r3)
0302     ld  r7, VCPU_CTR_TM(r3)
0303     ld  r8, VCPU_AMR_TM(r3)
0304     ld  r9, VCPU_TAR_TM(r3)
0305     ld  r10, VCPU_XER_TM(r3)
0306     mtlr    r5
0307     mtcr    r6
0308     mtctr   r7
0309     mtspr   SPRN_AMR, r8
0310     mtspr   SPRN_TAR, r9
0311     mtxer   r10
0312 
0313     /*
0314      * Load up PPR and DSCR values but don't put them in the actual SPRs
0315      * till the last moment to avoid running with userspace PPR and DSCR for
0316      * too long.
0317      */
0318     ld  r29, VCPU_DSCR_TM(r3)
0319     ld  r30, VCPU_PPR_TM(r3)
0320 
0321     /* Clear the MSR RI since r1, r13 are all going to be foobar. */
0322     li  r5, 0
0323     mtmsrd  r5, 1
0324 
0325     /* Load GPRs r0-r28 */
0326     reg = 0
0327     .rept   29
0328     ld  reg, VCPU_GPRS_TM(reg)(r31)
0329     reg = reg + 1
0330     .endr
0331 
0332     mtspr   SPRN_DSCR, r29
0333     mtspr   SPRN_PPR, r30
0334 
0335     /* Load final GPRs */
0336     ld  29, VCPU_GPRS_TM(29)(r31)
0337     ld  30, VCPU_GPRS_TM(30)(r31)
0338     ld  31, VCPU_GPRS_TM(31)(r31)
0339 
0340     /* TM checkpointed state is now setup.  All GPRs are now volatile. */
0341     TRECHKPT
0342 
0343     /* Now let's get back the state we need. */
0344     HMT_MEDIUM
0345     GET_PACA(r13)
0346     ld  r1, HSTATE_SCRATCH2(r13)
0347     REST_GPR(7, r1)
0348     mtspr   SPRN_DSCR, r7
0349 
0350     /* Set the MSR RI since we have our registers back. */
0351     li  r5, MSR_RI
0352     mtmsrd  r5, 1
0353 
0354     /* Restore TOC pointer and CR */
0355     REST_GPR(2, r1)
0356     REST_GPR(6, r1)
0357     mtcr    r6
0358 
0359     /* Restore non-volatile registers if requested to. */
0360     beq cr7, 5f
0361     REST_GPR(10, r1)
0362     REST_NVGPRS(r1)
0363 
0364 5:  addi    r1, r1, SWITCH_FRAME_SIZE
0365     ld  r0, PPC_LR_STKOFF(r1)
0366     mtlr    r0
0367 
0368 9:  /* Restore MSR bits if requested */
0369     beqlr   cr7
0370     mtmsrd  r10, 0
0371     blr
0372 
0373 /*
0374  * _kvmppc_restore_tm_pr() is a wrapper around __kvmppc_restore_tm(), so that it
0375  * can be invoked from C function by PR KVM only.
0376  */
0377 _GLOBAL(_kvmppc_restore_tm_pr)
0378     mflr    r0
0379     std r0, PPC_LR_STKOFF(r1)
0380     stdu    r1, -PPC_MIN_STKFRM(r1)
0381 
0382     /* save TAR so that it can be recovered later */
0383     mfspr   r8, SPRN_TAR
0384     std r8, PPC_MIN_STKFRM-8(r1)
0385 
0386     li  r5, 1
0387     bl  __kvmppc_restore_tm
0388 
0389     ld  r8, PPC_MIN_STKFRM-8(r1)
0390     mtspr   SPRN_TAR, r8
0391 
0392     addi    r1, r1, PPC_MIN_STKFRM
0393     ld  r0, PPC_LR_STKOFF(r1)
0394     mtlr    r0
0395     blr
0396 
0397 EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
0398 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */