Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * Transactional memory support routines to reclaim and recheckpoint
0004  * transactional process state.
0005  *
0006  * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation.
0007  */
0008 
0009 #include <asm/asm-offsets.h>
0010 #include <asm/ppc_asm.h>
0011 #include <asm/ppc-opcode.h>
0012 #include <asm/ptrace.h>
0013 #include <asm/reg.h>
0014 #include <asm/bug.h>
0015 #include <asm/export.h>
0016 #include <asm/feature-fixups.h>
0017 
0018 #ifdef CONFIG_VSX
0019 /* See fpu.S, this is borrowed from there */
0020 #define __SAVE_32FPRS_VSRS(n,c,base)        \
0021 BEGIN_FTR_SECTION               \
0022     b   2f;             \
0023 END_FTR_SECTION_IFSET(CPU_FTR_VSX);     \
0024     SAVE_32FPRS(n,base);            \
0025     b   3f;             \
0026 2:  SAVE_32VSRS(n,c,base);          \
0027 3:
0028 #define __REST_32FPRS_VSRS(n,c,base)        \
0029 BEGIN_FTR_SECTION               \
0030     b   2f;             \
0031 END_FTR_SECTION_IFSET(CPU_FTR_VSX);     \
0032     REST_32FPRS(n,base);            \
0033     b   3f;             \
0034 2:  REST_32VSRS(n,c,base);          \
0035 3:
0036 #else
0037 #define __SAVE_32FPRS_VSRS(n,c,base)    SAVE_32FPRS(n, base)
0038 #define __REST_32FPRS_VSRS(n,c,base)    REST_32FPRS(n, base)
0039 #endif
0040 #define SAVE_32FPRS_VSRS(n,c,base) \
0041     __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
0042 #define REST_32FPRS_VSRS(n,c,base) \
0043     __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
0044 
0045 /* Stack frame offsets for local variables. */
0046 #define TM_FRAME_L0 TM_FRAME_SIZE-16
0047 #define TM_FRAME_L1 TM_FRAME_SIZE-8
0048 
0049 
0050 /* In order to access the TM SPRs, TM must be enabled.  So, do so: */
0051 _GLOBAL(tm_enable)
0052     mfmsr   r4
0053     li  r3, MSR_TM >> 32
0054     sldi    r3, r3, 32
0055     and.    r0, r4, r3
0056     bne 1f
0057     or  r4, r4, r3
0058     mtmsrd  r4
0059 1:  blr
0060 EXPORT_SYMBOL_GPL(tm_enable);
0061 
0062 _GLOBAL(tm_disable)
0063     mfmsr   r4
0064     li  r3, MSR_TM >> 32
0065     sldi    r3, r3, 32
0066     andc    r4, r4, r3
0067     mtmsrd  r4
0068     blr
0069 EXPORT_SYMBOL_GPL(tm_disable);
0070 
0071 _GLOBAL(tm_save_sprs)
0072     mfspr   r0, SPRN_TFHAR
0073     std r0, THREAD_TM_TFHAR(r3)
0074     mfspr   r0, SPRN_TEXASR
0075     std r0, THREAD_TM_TEXASR(r3)
0076     mfspr   r0, SPRN_TFIAR
0077     std r0, THREAD_TM_TFIAR(r3)
0078     blr
0079 
0080 _GLOBAL(tm_restore_sprs)
0081     ld  r0, THREAD_TM_TFHAR(r3)
0082     mtspr   SPRN_TFHAR, r0
0083     ld  r0, THREAD_TM_TEXASR(r3)
0084     mtspr   SPRN_TEXASR, r0
0085     ld  r0, THREAD_TM_TFIAR(r3)
0086     mtspr   SPRN_TFIAR, r0
0087     blr
0088 
0089     /* Passed an 8-bit failure cause as first argument. */
0090 _GLOBAL(tm_abort)
0091     TABORT(R3)
0092     blr
0093 EXPORT_SYMBOL_GPL(tm_abort);
0094 
0095 /*
0096  * void tm_reclaim(struct thread_struct *thread,
0097  *         uint8_t cause)
0098  *
0099  *  - Performs a full reclaim.  This destroys outstanding
0100  *    transactions and updates thread.ckpt_regs, thread.ckfp_state and
0101  *    thread.ckvr_state with the original checkpointed state.  Note that
0102  *    thread->regs is unchanged.
0103  *
0104  * Purpose is to both abort transactions of, and preserve the state of,
0105  * a transactions at a context switch. We preserve/restore both sets of process
0106  * state to restore them when the thread's scheduled again.  We continue in
0107  * userland as though nothing happened, but when the transaction is resumed
0108  * they will abort back to the checkpointed state we save out here.
0109  *
0110  * Call with IRQs off, stacks get all out of sync for some periods in here!
0111  */
0112 _GLOBAL(tm_reclaim)
0113     mfcr    r5
0114     mflr    r0
0115     stw r5, 8(r1)
0116     std r0, 16(r1)
0117     std r2, STK_GOT(r1)
0118     stdu    r1, -TM_FRAME_SIZE(r1)
0119 
0120     /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
0121 
0122     std r3, STK_PARAM(R3)(r1)
0123     SAVE_NVGPRS(r1)
0124 
0125     /*
0126      * Save kernel live AMR since it will be clobbered by treclaim
0127      * but can be used elsewhere later in kernel space.
0128      */
0129     mfspr   r3, SPRN_AMR
0130     std r3, TM_FRAME_L1(r1)
0131 
0132     /* We need to setup MSR for VSX register save instructions. */
0133     mfmsr   r14
0134     mr  r15, r14
0135     ori r15, r15, MSR_FP
0136     li  r16, 0
0137     ori r16, r16, MSR_EE /* IRQs hard off */
0138     andc    r15, r15, r16
0139     oris    r15, r15, MSR_VEC@h
0140 #ifdef CONFIG_VSX
0141     BEGIN_FTR_SECTION
0142     oris    r15,r15, MSR_VSX@h
0143     END_FTR_SECTION_IFSET(CPU_FTR_VSX)
0144 #endif
0145     mtmsrd  r15
0146     std r14, TM_FRAME_L0(r1)
0147 
0148     /* Do sanity check on MSR to make sure we are suspended */
0149     li  r7, (MSR_TS_S)@higher
0150     srdi    r6, r14, 32
0151     and r6, r6, r7
0152 1:  tdeqi   r6, 0
0153     EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
0154 
0155     /* Stash the stack pointer away for use after reclaim */
0156     std r1, PACAR1(r13)
0157 
0158     /* Clear MSR RI since we are about to use SCRATCH0, EE is already off */
0159     li  r5, 0
0160     mtmsrd  r5, 1
0161 
0162     /*
0163      * BE CAREFUL HERE:
0164      * At this point we can't take an SLB miss since we have MSR_RI
0165      * off. Load only to/from the stack/paca which are in SLB bolted regions
0166      * until we turn MSR RI back on.
0167      *
0168      * The moment we treclaim, ALL of our GPRs will switch
0169      * to user register state.  (FPRs, CCR etc. also!)
0170      * Use an sprg and a tm_scratch in the PACA to shuffle.
0171      */
0172     TRECLAIM(R4)                /* Cause in r4 */
0173 
0174     /*
0175      * ******************** GPRs ********************
0176      * Stash the checkpointed r13 in the scratch SPR and get the real paca.
0177      */
0178     SET_SCRATCH0(r13)
0179     GET_PACA(r13)
0180 
0181     /*
0182      * Stash the checkpointed r1 away in paca->tm_scratch and get the real
0183      * stack pointer back into r1.
0184      */
0185     std r1, PACATMSCRATCH(r13)
0186     ld  r1, PACAR1(r13)
0187 
0188     std r11, GPR11(r1)          /* Temporary stash */
0189 
0190     /*
0191      * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
0192      * clobbered by an exception once we turn on MSR_RI below.
0193      */
0194     ld  r11, PACATMSCRATCH(r13)
0195     std r11, GPR1(r1)
0196 
0197     /*
0198      * Store r13 away so we can free up the scratch SPR for the SLB fault
0199      * handler (needed once we start accessing the thread_struct).
0200      */
0201     GET_SCRATCH0(r11)
0202     std r11, GPR13(r1)
0203 
0204     /* Reset MSR RI so we can take SLB faults again */
0205     li  r11, MSR_RI
0206     mtmsrd  r11, 1
0207 
0208     /* Store the PPR in r11 and reset to decent value */
0209     mfspr   r11, SPRN_PPR
0210     HMT_MEDIUM
0211 
0212     /* Now get some more GPRS free */
0213     std r7, GPR7(r1)            /* Temporary stash */
0214     std r12, GPR12(r1)          /* ''   ''    ''   */
0215     ld  r12, STK_PARAM(R3)(r1)      /* Param 0, thread_struct * */
0216 
0217     std r11, THREAD_TM_PPR(r12)     /* Store PPR and free r11 */
0218 
0219     addi    r7, r12, PT_CKPT_REGS       /* Thread's ckpt_regs */
0220 
0221     /*
0222      * Make r7 look like an exception frame so that we can use the neat
0223      * GPRx(n) macros. r7 is NOT a pt_regs ptr!
0224      */
0225     subi    r7, r7, STACK_FRAME_OVERHEAD
0226 
0227     /* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
0228     SAVE_GPR(0, r7)             /* user r0 */
0229     SAVE_GPRS(2, 6, r7)         /* user r2-r6 */
0230     SAVE_GPRS(8, 10, r7)            /* user r8-r10 */
0231     ld  r3, GPR1(r1)            /* user r1 */
0232     ld  r4, GPR7(r1)            /* user r7 */
0233     ld  r5, GPR11(r1)           /* user r11 */
0234     ld  r6, GPR12(r1)           /* user r12 */
0235     ld  r8, GPR13(r1)           /* user r13 */
0236     std r3, GPR1(r7)
0237     std r4, GPR7(r7)
0238     std r5, GPR11(r7)
0239     std r6, GPR12(r7)
0240     std r8, GPR13(r7)
0241 
0242     SAVE_NVGPRS(r7)             /* user r14-r31 */
0243 
0244     /* ******************** NIP ******************** */
0245     mfspr   r3, SPRN_TFHAR
0246     std r3, _NIP(r7)            /* Returns to failhandler */
0247     /*
0248      * The checkpointed NIP is ignored when rescheduling/rechkpting,
0249      * but is used in signal return to 'wind back' to the abort handler.
0250      */
0251 
0252     /* ***************** CTR, LR, CR, XER ********** */
0253     mfctr   r3
0254     mflr    r4
0255     mfcr    r5
0256     mfxer   r6
0257 
0258     std r3, _CTR(r7)
0259     std r4, _LINK(r7)
0260     std r5, _CCR(r7)
0261     std r6, _XER(r7)
0262 
0263     /* ******************** TAR, DSCR ********** */
0264     mfspr   r3, SPRN_TAR
0265     mfspr   r4, SPRN_DSCR
0266 
0267     std r3, THREAD_TM_TAR(r12)
0268     std r4, THREAD_TM_DSCR(r12)
0269 
0270         /* ******************** AMR **************** */
0271         mfspr   r3, SPRN_AMR
0272         std r3, THREAD_TM_AMR(r12)
0273 
0274     /*
0275      * MSR and flags: We don't change CRs, and we don't need to alter MSR.
0276      */
0277 
0278 
0279     /*
0280      * ******************** FPR/VR/VSRs ************
0281      * After reclaiming, capture the checkpointed FPRs/VRs.
0282      *
0283      * We enabled VEC/FP/VSX in the msr above, so we can execute these
0284      * instructions!
0285      */
0286     mr  r3, r12
0287 
0288     /* Altivec (VEC/VMX/VR)*/
0289     addi    r7, r3, THREAD_CKVRSTATE
0290     SAVE_32VRS(0, r6, r7)   /* r6 scratch, r7 ckvr_state */
0291     mfvscr  v0
0292     li  r6, VRSTATE_VSCR
0293     stvx    v0, r7, r6
0294 
0295     /* VRSAVE */
0296     mfspr   r0, SPRN_VRSAVE
0297     std r0, THREAD_CKVRSAVE(r3)
0298 
0299     /* Floating Point (FP) */
0300     addi    r7, r3, THREAD_CKFPSTATE
0301     SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 ckfp_state */
0302     mffs    fr0
0303     stfd    fr0,FPSTATE_FPSCR(r7)
0304 
0305 
0306     /*
0307      * TM regs, incl TEXASR -- these live in thread_struct.  Note they've
0308      * been updated by the treclaim, to explain to userland the failure
0309      * cause (aborted).
0310      */
0311     mfspr   r0, SPRN_TEXASR
0312     mfspr   r3, SPRN_TFHAR
0313     mfspr   r4, SPRN_TFIAR
0314     std r0, THREAD_TM_TEXASR(r12)
0315     std r3, THREAD_TM_TFHAR(r12)
0316     std r4, THREAD_TM_TFIAR(r12)
0317 
0318     /* Restore kernel live AMR */
0319     ld  r8, TM_FRAME_L1(r1)
0320     mtspr   SPRN_AMR, r8
0321 
0322     /* Restore original MSR/IRQ state & clear TM mode */
0323     ld  r14, TM_FRAME_L0(r1)        /* Orig MSR */
0324 
0325     li  r15, 0
0326     rldimi  r14, r15, MSR_TS_LG, (63-MSR_TS_LG)-1
0327     mtmsrd  r14
0328 
0329     REST_NVGPRS(r1)
0330 
0331     addi    r1, r1, TM_FRAME_SIZE
0332     lwz r4, 8(r1)
0333     ld  r0, 16(r1)
0334     mtcr    r4
0335     mtlr    r0
0336     ld  r2, STK_GOT(r1)
0337 
0338     /* Load CPU's default DSCR */
0339     ld  r0, PACA_DSCR_DEFAULT(r13)
0340     mtspr   SPRN_DSCR, r0
0341 
0342     blr
0343 
0344 
0345     /*
0346      * void __tm_recheckpoint(struct thread_struct *thread)
0347      *  - Restore the checkpointed register state saved by tm_reclaim
0348      *    when we switch_to a process.
0349      *
0350      *  Call with IRQs off, stacks get all out of sync for
0351      *  some periods in here!
0352      */
0353 _GLOBAL(__tm_recheckpoint)
0354     mfcr    r5
0355     mflr    r0
0356     stw r5, 8(r1)
0357     std r0, 16(r1)
0358     std r2, STK_GOT(r1)
0359     stdu    r1, -TM_FRAME_SIZE(r1)
0360 
0361     /*
0362      * We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
0363      * This is used for backing up the NVGPRs:
0364      */
0365     SAVE_NVGPRS(r1)
0366 
0367     /*
0368      * Save kernel live AMR since it will be clobbered for trechkpt
0369      * but can be used elsewhere later in kernel space.
0370      */
0371     mfspr   r8, SPRN_AMR
0372     std r8, TM_FRAME_L0(r1)
0373 
0374     /* Load complete register state from ts_ckpt* registers */
0375 
0376     addi    r7, r3, PT_CKPT_REGS        /* Thread's ckpt_regs */
0377 
0378     /*
0379      * Make r7 look like an exception frame so that we can use the neat
0380      * GPRx(n) macros. r7 is now NOT a pt_regs ptr!
0381      */
0382     subi    r7, r7, STACK_FRAME_OVERHEAD
0383 
0384     /* We need to setup MSR for FP/VMX/VSX register save instructions. */
0385     mfmsr   r6
0386     mr  r5, r6
0387     ori r5, r5, MSR_FP
0388 #ifdef CONFIG_ALTIVEC
0389     oris    r5, r5, MSR_VEC@h
0390 #endif
0391 #ifdef CONFIG_VSX
0392     BEGIN_FTR_SECTION
0393     oris    r5,r5, MSR_VSX@h
0394     END_FTR_SECTION_IFSET(CPU_FTR_VSX)
0395 #endif
0396     mtmsrd  r5
0397 
0398 #ifdef CONFIG_ALTIVEC
0399     /*
0400      * FP and VEC registers: These are recheckpointed from
0401      * thread.ckfp_state and thread.ckvr_state respectively. The
0402      * thread.fp_state[] version holds the 'live' (transactional)
0403      * and will be loaded subsequently by any FPUnavailable trap.
0404      */
0405     addi    r8, r3, THREAD_CKVRSTATE
0406     li  r5, VRSTATE_VSCR
0407     lvx v0, r8, r5
0408     mtvscr  v0
0409     REST_32VRS(0, r5, r8)           /* r5 scratch, r8 ptr */
0410     ld  r5, THREAD_CKVRSAVE(r3)
0411     mtspr   SPRN_VRSAVE, r5
0412 #endif
0413 
0414     addi    r8, r3, THREAD_CKFPSTATE
0415     lfd fr0, FPSTATE_FPSCR(r8)
0416     MTFSF_L(fr0)
0417     REST_32FPRS_VSRS(0, R4, R8)
0418 
0419     mtmsr   r6              /* FP/Vec off again! */
0420 
0421 restore_gprs:
0422 
0423     /* ****************** CTR, LR, XER ************* */
0424     ld  r4, _CTR(r7)
0425     ld  r5, _LINK(r7)
0426     ld  r8, _XER(r7)
0427 
0428     mtctr   r4
0429     mtlr    r5
0430     mtxer   r8
0431 
0432     /* ******************** TAR ******************** */
0433     ld  r4, THREAD_TM_TAR(r3)
0434     mtspr   SPRN_TAR,   r4
0435 
0436     /* ******************** AMR ******************** */
0437     ld  r4, THREAD_TM_AMR(r3)
0438     mtspr   SPRN_AMR, r4
0439 
0440     /* Load up the PPR and DSCR in GPRs only at this stage */
0441     ld  r5, THREAD_TM_DSCR(r3)
0442     ld  r6, THREAD_TM_PPR(r3)
0443 
0444     REST_GPR(0, r7)             /* GPR0 */
0445     REST_GPRS(2, 4, r7)         /* GPR2-4 */
0446     REST_GPRS(8, 12, r7)            /* GPR8-12 */
0447     REST_GPRS(14, 31, r7)           /* GPR14-31 */
0448 
0449     /* Load up PPR and DSCR here so we don't run with user values for long */
0450     mtspr   SPRN_DSCR, r5
0451     mtspr   SPRN_PPR, r6
0452 
0453     /*
0454      * Do final sanity check on TEXASR to make sure FS is set. Do this
0455      * here before we load up the userspace r1 so any bugs we hit will get
0456      * a call chain.
0457      */
0458     mfspr   r5, SPRN_TEXASR
0459     srdi    r5, r5, 16
0460     li  r6, (TEXASR_FS)@h
0461     and r6, r6, r5
0462 1:  tdeqi   r6, 0
0463     EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
0464 
0465     /*
0466      * Do final sanity check on MSR to make sure we are not transactional
0467      * or suspended.
0468      */
0469     mfmsr   r6
0470     li  r5, (MSR_TS_MASK)@higher
0471     srdi    r6, r6, 32
0472     and r6, r6, r5
0473 1:  tdnei   r6, 0
0474     EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
0475 
0476     /* Restore CR */
0477     ld  r6, _CCR(r7)
0478     mtcr    r6
0479 
0480     REST_GPR(6, r7)
0481 
0482     /*
0483      * Store user r1 and r5 and r13 on the stack (in the unused save
0484      * areas / compiler reserved areas), so that we can access them after
0485      * we clear MSR RI.
0486      */
0487 
0488     REST_GPR(5, r7)
0489     std r5, -8(r1)
0490     ld  r5, GPR13(r7)
0491     std r5, -16(r1)
0492     ld  r5, GPR1(r7)
0493     std r5, -24(r1)
0494 
0495     REST_GPR(7, r7)
0496 
0497     /* Stash the stack pointer away for use after recheckpoint */
0498     std r1, PACAR1(r13)
0499 
0500     /* Clear MSR RI since we are about to clobber r13. EE is already off */
0501     li  r5, 0
0502     mtmsrd  r5, 1
0503 
0504     /*
0505      * BE CAREFUL HERE:
0506      * At this point we can't take an SLB miss since we have MSR_RI
0507      * off. Load only to/from the stack/paca which are in SLB bolted regions
0508      * until we turn MSR RI back on.
0509      */
0510 
0511     ld  r5, -8(r1)
0512     ld  r13, -16(r1)
0513     ld  r1, -24(r1)
0514 
0515     /* Commit register state as checkpointed state: */
0516     TRECHKPT
0517 
0518     HMT_MEDIUM
0519 
0520     /*
0521      * Our transactional state has now changed.
0522      *
0523      * Now just get out of here.  Transactional (current) state will be
0524      * updated once restore is called on the return path in the _switch-ed
0525      * -to process.
0526      */
0527 
0528     GET_PACA(r13)
0529     ld  r1, PACAR1(r13)
0530 
0531     /* R13, R1 is restored, so we are recoverable again.  EE is still off */
0532     li  r4, MSR_RI
0533     mtmsrd  r4, 1
0534 
0535     /* Restore kernel live AMR */
0536     ld  r8, TM_FRAME_L0(r1)
0537     mtspr   SPRN_AMR, r8
0538 
0539     REST_NVGPRS(r1)
0540 
0541     addi    r1, r1, TM_FRAME_SIZE
0542     lwz r4, 8(r1)
0543     ld  r0, 16(r1)
0544     mtcr    r4
0545     mtlr    r0
0546     ld  r2, STK_GOT(r1)
0547 
0548     /* Load CPU's default DSCR */
0549     ld  r0, PACA_DSCR_DEFAULT(r13)
0550     mtspr   SPRN_DSCR, r0
0551 
0552     blr
0553 
0554     /* ****************************************************************** */