![]() |
|
|||
0001 /* SPDX-License-Identifier: GPL-2.0-or-later */ 0002 /* 0003 * Copyright 2018, IBM Corporation. 0004 * 0005 * This file contains general idle entry/exit functions to save 0006 * and restore stack and NVGPRs which allows C code to call idle 0007 * states that lose GPRs, and it will return transparently with 0008 * SRR1 wakeup reason return value. 0009 * 0010 * The platform / CPU caller must ensure SPRs and any other non-GPR 0011 * state is saved and restored correctly, handle KVM, interrupts, etc. 0012 */ 0013 0014 #include <asm/ppc_asm.h> 0015 #include <asm/asm-offsets.h> 0016 #include <asm/ppc-opcode.h> 0017 #include <asm/cpuidle.h> 0018 #include <asm/thread_info.h> /* TLF_NAPPING */ 0019 0020 #ifdef CONFIG_PPC_P7_NAP 0021 /* 0022 * Desired PSSCR in r3 0023 * 0024 * No state will be lost regardless of wakeup mechanism (interrupt or NIA). 0025 * 0026 * An EC=0 type wakeup will return with a value of 0. SRESET wakeup (which can 0027 * happen with xscom SRESET and possibly MCE) may clobber volatiles except LR, 0028 * and must blr, to return to caller with r3 set according to caller's expected 0029 * return code (for Book3S/64 that is SRR1). 0030 */ 0031 _GLOBAL(isa300_idle_stop_noloss) 0032 mtspr SPRN_PSSCR,r3 0033 PPC_STOP 0034 li r3,0 0035 blr 0036 0037 /* 0038 * Desired PSSCR in r3 0039 * 0040 * GPRs may be lost, so they are saved here. Wakeup is by interrupt only. 0041 * The SRESET wakeup returns to this function's caller by calling 0042 * idle_return_gpr_loss with r3 set to desired return value. 0043 * 0044 * A wakeup without GPR loss may alteratively be handled as in 0045 * isa300_idle_stop_noloss and blr directly, as an optimisation. 0046 * 0047 * The caller is responsible for saving/restoring SPRs, MSR, timebase, 0048 * etc. 0049 */ 0050 _GLOBAL(isa300_idle_stop_mayloss) 0051 mtspr SPRN_PSSCR,r3 0052 std r1,PACAR1(r13) 0053 mflr r4 0054 mfcr r5 0055 /* 0056 * Use the stack red zone rather than a new frame for saving regs since 0057 * in the case of no GPR loss the wakeup code branches directly back to 0058 * the caller without deallocating the stack frame first. 0059 */ 0060 std r2,-8*1(r1) 0061 std r14,-8*2(r1) 0062 std r15,-8*3(r1) 0063 std r16,-8*4(r1) 0064 std r17,-8*5(r1) 0065 std r18,-8*6(r1) 0066 std r19,-8*7(r1) 0067 std r20,-8*8(r1) 0068 std r21,-8*9(r1) 0069 std r22,-8*10(r1) 0070 std r23,-8*11(r1) 0071 std r24,-8*12(r1) 0072 std r25,-8*13(r1) 0073 std r26,-8*14(r1) 0074 std r27,-8*15(r1) 0075 std r28,-8*16(r1) 0076 std r29,-8*17(r1) 0077 std r30,-8*18(r1) 0078 std r31,-8*19(r1) 0079 std r4,-8*20(r1) 0080 std r5,-8*21(r1) 0081 /* 168 bytes */ 0082 PPC_STOP 0083 b . /* catch bugs */ 0084 0085 /* 0086 * Desired return value in r3 0087 * 0088 * The idle wakeup SRESET interrupt can call this after calling 0089 * to return to the idle sleep function caller with r3 as the return code. 0090 * 0091 * This must not be used if idle was entered via a _noloss function (use 0092 * a simple blr instead). 0093 */ 0094 _GLOBAL(idle_return_gpr_loss) 0095 ld r1,PACAR1(r13) 0096 ld r4,-8*20(r1) 0097 ld r5,-8*21(r1) 0098 mtlr r4 0099 mtcr r5 0100 /* 0101 * KVM nap requires r2 to be saved, rather than just restoring it 0102 * from PACATOC. This could be avoided for that less common case 0103 * if KVM saved its r2. 0104 */ 0105 ld r2,-8*1(r1) 0106 ld r14,-8*2(r1) 0107 ld r15,-8*3(r1) 0108 ld r16,-8*4(r1) 0109 ld r17,-8*5(r1) 0110 ld r18,-8*6(r1) 0111 ld r19,-8*7(r1) 0112 ld r20,-8*8(r1) 0113 ld r21,-8*9(r1) 0114 ld r22,-8*10(r1) 0115 ld r23,-8*11(r1) 0116 ld r24,-8*12(r1) 0117 ld r25,-8*13(r1) 0118 ld r26,-8*14(r1) 0119 ld r27,-8*15(r1) 0120 ld r28,-8*16(r1) 0121 ld r29,-8*17(r1) 0122 ld r30,-8*18(r1) 0123 ld r31,-8*19(r1) 0124 blr 0125 0126 /* 0127 * This is the sequence required to execute idle instructions, as 0128 * specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0. 0129 * We have to store a GPR somewhere, ptesync, then reload it, and create 0130 * a false dependency on the result of the load. It doesn't matter which 0131 * GPR we store, or where we store it. We have already stored r2 to the 0132 * stack at -8(r1) in isa206_idle_insn_mayloss, so use that. 0133 */ 0134 #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ 0135 /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ 0136 std r2,-8(r1); \ 0137 ptesync; \ 0138 ld r2,-8(r1); \ 0139 236: cmpd cr0,r2,r2; \ 0140 bne 236b; \ 0141 IDLE_INST; \ 0142 b . /* catch bugs */ 0143 0144 /* 0145 * Desired instruction type in r3 0146 * 0147 * GPRs may be lost, so they are saved here. Wakeup is by interrupt only. 0148 * The SRESET wakeup returns to this function's caller by calling 0149 * idle_return_gpr_loss with r3 set to desired return value. 0150 * 0151 * A wakeup without GPR loss may alteratively be handled as in 0152 * isa300_idle_stop_noloss and blr directly, as an optimisation. 0153 * 0154 * The caller is responsible for saving/restoring SPRs, MSR, timebase, 0155 * etc. 0156 * 0157 * This must be called in real-mode (MSR_IDLE). 0158 */ 0159 _GLOBAL(isa206_idle_insn_mayloss) 0160 std r1,PACAR1(r13) 0161 mflr r4 0162 mfcr r5 0163 /* 0164 * Use the stack red zone rather than a new frame for saving regs since 0165 * in the case of no GPR loss the wakeup code branches directly back to 0166 * the caller without deallocating the stack frame first. 0167 */ 0168 std r2,-8*1(r1) 0169 std r14,-8*2(r1) 0170 std r15,-8*3(r1) 0171 std r16,-8*4(r1) 0172 std r17,-8*5(r1) 0173 std r18,-8*6(r1) 0174 std r19,-8*7(r1) 0175 std r20,-8*8(r1) 0176 std r21,-8*9(r1) 0177 std r22,-8*10(r1) 0178 std r23,-8*11(r1) 0179 std r24,-8*12(r1) 0180 std r25,-8*13(r1) 0181 std r26,-8*14(r1) 0182 std r27,-8*15(r1) 0183 std r28,-8*16(r1) 0184 std r29,-8*17(r1) 0185 std r30,-8*18(r1) 0186 std r31,-8*19(r1) 0187 std r4,-8*20(r1) 0188 std r5,-8*21(r1) 0189 cmpwi r3,PNV_THREAD_NAP 0190 bne 1f 0191 IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP) 0192 1: cmpwi r3,PNV_THREAD_SLEEP 0193 bne 2f 0194 IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 0195 2: IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) 0196 #endif 0197 0198 #ifdef CONFIG_PPC_970_NAP 0199 _GLOBAL(power4_idle_nap) 0200 LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW) 0201 ld r9,PACA_THREAD_INFO(r13) 0202 ld r8,TI_LOCAL_FLAGS(r9) 0203 ori r8,r8,_TLF_NAPPING 0204 std r8,TI_LOCAL_FLAGS(r9) 0205 /* 0206 * NAPPING bit is set, from this point onward power4_fixup_nap 0207 * will cause exceptions to return to power4_idle_nap_return. 0208 */ 0209 1: sync 0210 isync 0211 mtmsrd r7 0212 isync 0213 b 1b 0214 0215 .globl power4_idle_nap_return 0216 power4_idle_nap_return: 0217 blr 0218 #endif
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |