Back to home page

OSCL-LXR

 
 

    


0001 #include <asm/asm-offsets.h>
0002 #include <asm/bug.h>
0003 #ifdef CONFIG_PPC_BOOK3S
0004 #include <asm/exception-64s.h>
0005 #else
0006 #include <asm/exception-64e.h>
0007 #endif
0008 #include <asm/feature-fixups.h>
0009 #include <asm/head-64.h>
0010 #include <asm/hw_irq.h>
0011 #include <asm/kup.h>
0012 #include <asm/mmu.h>
0013 #include <asm/ppc_asm.h>
0014 #include <asm/ptrace.h>
0015 
0016     .section    ".toc","aw"
0017 SYS_CALL_TABLE:
0018     .tc sys_call_table[TC],sys_call_table
0019 
0020 #ifdef CONFIG_COMPAT
0021 COMPAT_SYS_CALL_TABLE:
0022     .tc compat_sys_call_table[TC],compat_sys_call_table
0023 #endif
0024     .previous
0025 
0026     .align 7
0027 
0028 .macro DEBUG_SRR_VALID srr
0029 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
0030     .ifc \srr,srr
0031     mfspr   r11,SPRN_SRR0
0032     ld  r12,_NIP(r1)
0033     clrrdi  r11,r11,2
0034     clrrdi  r12,r12,2
0035 100:    tdne    r11,r12
0036     EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
0037     mfspr   r11,SPRN_SRR1
0038     ld  r12,_MSR(r1)
0039 100:    tdne    r11,r12
0040     EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
0041     .else
0042     mfspr   r11,SPRN_HSRR0
0043     ld  r12,_NIP(r1)
0044     clrrdi  r11,r11,2
0045     clrrdi  r12,r12,2
0046 100:    tdne    r11,r12
0047     EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
0048     mfspr   r11,SPRN_HSRR1
0049     ld  r12,_MSR(r1)
0050 100:    tdne    r11,r12
0051     EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
0052     .endif
0053 #endif
0054 .endm
0055 
0056 #ifdef CONFIG_PPC_BOOK3S
0057 .macro system_call_vectored name trapnr
0058     .globl system_call_vectored_\name
0059 system_call_vectored_\name:
0060 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
0061     SCV_INTERRUPT_TO_KERNEL
0062     mr  r10,r1
0063     ld  r1,PACAKSAVE(r13)
0064     std r10,0(r1)
0065     std r11,_NIP(r1)
0066     std r12,_MSR(r1)
0067     std r0,GPR0(r1)
0068     std r10,GPR1(r1)
0069     std r2,GPR2(r1)
0070     ld  r2,PACATOC(r13)
0071     mfcr    r12
0072     li  r11,0
0073     /* Can we avoid saving r3-r8 in common case? */
0074     std r3,GPR3(r1)
0075     std r4,GPR4(r1)
0076     std r5,GPR5(r1)
0077     std r6,GPR6(r1)
0078     std r7,GPR7(r1)
0079     std r8,GPR8(r1)
0080     /* Zero r9-r12, this should only be required when restoring all GPRs */
0081     std r11,GPR9(r1)
0082     std r11,GPR10(r1)
0083     std r11,GPR11(r1)
0084     std r11,GPR12(r1)
0085     std r9,GPR13(r1)
0086     SAVE_NVGPRS(r1)
0087     std r11,_XER(r1)
0088     std r11,_LINK(r1)
0089     std r11,_CTR(r1)
0090 
0091     li  r11,\trapnr
0092     std r11,_TRAP(r1)
0093     std r12,_CCR(r1)
0094     addi    r10,r1,STACK_FRAME_OVERHEAD
0095     ld  r11,exception_marker@toc(r2)
0096     std r11,-16(r10)        /* "regshere" marker */
0097 
0098 BEGIN_FTR_SECTION
0099     HMT_MEDIUM
0100 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0101 
0102     /*
0103      * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
0104      * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
0105      * and interrupts may be masked and pending already.
0106      * system_call_exception() will call trace_hardirqs_off() which means
0107      * interrupts could already have been blocked before trace_hardirqs_off,
0108      * but this is the best we can do.
0109      */
0110 
0111     /* Calling convention has r9 = orig r0, r10 = regs */
0112     mr  r9,r0
0113     bl  system_call_exception
0114 
0115 .Lsyscall_vectored_\name\()_exit:
0116     addi    r4,r1,STACK_FRAME_OVERHEAD
0117     li  r5,1 /* scv */
0118     bl  syscall_exit_prepare
0119     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0120 .Lsyscall_vectored_\name\()_rst_start:
0121     lbz r11,PACAIRQHAPPENED(r13)
0122     andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
0123     bne-    syscall_vectored_\name\()_restart
0124     li  r11,IRQS_ENABLED
0125     stb r11,PACAIRQSOFTMASK(r13)
0126     li  r11,0
0127     stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
0128 
0129     ld  r2,_CCR(r1)
0130     ld  r4,_NIP(r1)
0131     ld  r5,_MSR(r1)
0132 
0133 BEGIN_FTR_SECTION
0134     stdcx.  r0,0,r1         /* to clear the reservation */
0135 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
0136 
0137 BEGIN_FTR_SECTION
0138     HMT_MEDIUM_LOW
0139 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0140 
0141     cmpdi   r3,0
0142     bne .Lsyscall_vectored_\name\()_restore_regs
0143 
0144     /* rfscv returns with LR->NIA and CTR->MSR */
0145     mtlr    r4
0146     mtctr   r5
0147 
0148     /* Could zero these as per ABI, but we may consider a stricter ABI
0149      * which preserves these if libc implementations can benefit, so
0150      * restore them for now until further measurement is done. */
0151     ld  r0,GPR0(r1)
0152     ld  r4,GPR4(r1)
0153     ld  r5,GPR5(r1)
0154     ld  r6,GPR6(r1)
0155     ld  r7,GPR7(r1)
0156     ld  r8,GPR8(r1)
0157     /* Zero volatile regs that may contain sensitive kernel data */
0158     li  r9,0
0159     li  r10,0
0160     li  r11,0
0161     li  r12,0
0162     mtspr   SPRN_XER,r0
0163 
0164     /*
0165      * We don't need to restore AMR on the way back to userspace for KUAP.
0166      * The value of AMR only matters while we're in the kernel.
0167      */
0168     mtcr    r2
0169     REST_GPRS(2, 3, r1)
0170     REST_GPR(13, r1)
0171     REST_GPR(1, r1)
0172     RFSCV_TO_USER
0173     b   .   /* prevent speculative execution */
0174 
0175 .Lsyscall_vectored_\name\()_restore_regs:
0176     mtspr   SPRN_SRR0,r4
0177     mtspr   SPRN_SRR1,r5
0178 
0179     ld  r3,_CTR(r1)
0180     ld  r4,_LINK(r1)
0181     ld  r5,_XER(r1)
0182 
0183     REST_NVGPRS(r1)
0184     ld  r0,GPR0(r1)
0185     mtcr    r2
0186     mtctr   r3
0187     mtlr    r4
0188     mtspr   SPRN_XER,r5
0189     REST_GPRS(2, 13, r1)
0190     REST_GPR(1, r1)
0191     RFI_TO_USER
0192 .Lsyscall_vectored_\name\()_rst_end:
0193 
0194 syscall_vectored_\name\()_restart:
0195 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
0196     GET_PACA(r13)
0197     ld  r1,PACA_EXIT_SAVE_R1(r13)
0198     ld  r2,PACATOC(r13)
0199     ld  r3,RESULT(r1)
0200     addi    r4,r1,STACK_FRAME_OVERHEAD
0201     li  r11,IRQS_ALL_DISABLED
0202     stb r11,PACAIRQSOFTMASK(r13)
0203     bl  syscall_exit_restart
0204     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0205     b   .Lsyscall_vectored_\name\()_rst_start
0206 1:
0207 
0208 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
0209 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
0210 
0211 .endm
0212 
0213 system_call_vectored common 0x3000
0214 
0215 /*
0216  * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
0217  * which is tested by system_call_exception when r0 is -1 (as set by vector
0218  * entry code).
0219  */
0220 system_call_vectored sigill 0x7ff0
0221 
0222 #endif /* CONFIG_PPC_BOOK3S */
0223 
0224     .balign IFETCH_ALIGN_BYTES
0225     .globl system_call_common_real
0226 system_call_common_real:
0227 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
0228     ld  r10,PACAKMSR(r13)   /* get MSR value for kernel */
0229     mtmsrd  r10
0230 
0231     .balign IFETCH_ALIGN_BYTES
0232     .globl system_call_common
0233 system_call_common:
0234 _ASM_NOKPROBE_SYMBOL(system_call_common)
0235     mr  r10,r1
0236     ld  r1,PACAKSAVE(r13)
0237     std r10,0(r1)
0238     std r11,_NIP(r1)
0239     std r12,_MSR(r1)
0240     std r0,GPR0(r1)
0241     std r10,GPR1(r1)
0242     std r2,GPR2(r1)
0243 #ifdef CONFIG_PPC_FSL_BOOK3E
0244 START_BTB_FLUSH_SECTION
0245     BTB_FLUSH(r10)
0246 END_BTB_FLUSH_SECTION
0247 #endif
0248     ld  r2,PACATOC(r13)
0249     mfcr    r12
0250     li  r11,0
0251     /* Can we avoid saving r3-r8 in common case? */
0252     std r3,GPR3(r1)
0253     std r4,GPR4(r1)
0254     std r5,GPR5(r1)
0255     std r6,GPR6(r1)
0256     std r7,GPR7(r1)
0257     std r8,GPR8(r1)
0258     /* Zero r9-r12, this should only be required when restoring all GPRs */
0259     std r11,GPR9(r1)
0260     std r11,GPR10(r1)
0261     std r11,GPR11(r1)
0262     std r11,GPR12(r1)
0263     std r9,GPR13(r1)
0264     SAVE_NVGPRS(r1)
0265     std r11,_XER(r1)
0266     std r11,_CTR(r1)
0267     mflr    r10
0268 
0269     /*
0270      * This clears CR0.SO (bit 28), which is the error indication on
0271      * return from this system call.
0272      */
0273     rldimi  r12,r11,28,(63-28)
0274     li  r11,0xc00
0275     std r10,_LINK(r1)
0276     std r11,_TRAP(r1)
0277     std r12,_CCR(r1)
0278     addi    r10,r1,STACK_FRAME_OVERHEAD
0279     ld  r11,exception_marker@toc(r2)
0280     std r11,-16(r10)        /* "regshere" marker */
0281 
0282 #ifdef CONFIG_PPC_BOOK3S
0283     li  r11,1
0284     stb r11,PACASRR_VALID(r13)
0285 #endif
0286 
0287     /*
0288      * We always enter kernel from userspace with irq soft-mask enabled and
0289      * nothing pending. system_call_exception() will call
0290      * trace_hardirqs_off().
0291      */
0292     li  r11,IRQS_ALL_DISABLED
0293     stb r11,PACAIRQSOFTMASK(r13)
0294 #ifdef CONFIG_PPC_BOOK3S
0295     li  r12,-1 /* Set MSR_EE and MSR_RI */
0296     mtmsrd  r12,1
0297 #else
0298     wrteei  1
0299 #endif
0300 
0301     /* Calling convention has r9 = orig r0, r10 = regs */
0302     mr  r9,r0
0303     bl  system_call_exception
0304 
0305 .Lsyscall_exit:
0306     addi    r4,r1,STACK_FRAME_OVERHEAD
0307     li  r5,0 /* !scv */
0308     bl  syscall_exit_prepare
0309     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0310 #ifdef CONFIG_PPC_BOOK3S
0311 .Lsyscall_rst_start:
0312     lbz r11,PACAIRQHAPPENED(r13)
0313     andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
0314     bne-    syscall_restart
0315 #endif
0316     li  r11,IRQS_ENABLED
0317     stb r11,PACAIRQSOFTMASK(r13)
0318     li  r11,0
0319     stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
0320 
0321     ld  r2,_CCR(r1)
0322     ld  r6,_LINK(r1)
0323     mtlr    r6
0324 
0325 #ifdef CONFIG_PPC_BOOK3S
0326     lbz r4,PACASRR_VALID(r13)
0327     cmpdi   r4,0
0328     bne 1f
0329     li  r4,0
0330     stb r4,PACASRR_VALID(r13)
0331 #endif
0332     ld  r4,_NIP(r1)
0333     ld  r5,_MSR(r1)
0334     mtspr   SPRN_SRR0,r4
0335     mtspr   SPRN_SRR1,r5
0336 1:
0337     DEBUG_SRR_VALID srr
0338 
0339 BEGIN_FTR_SECTION
0340     stdcx.  r0,0,r1         /* to clear the reservation */
0341 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
0342 
0343     cmpdi   r3,0
0344     bne .Lsyscall_restore_regs
0345     /* Zero volatile regs that may contain sensitive kernel data */
0346     li  r0,0
0347     li  r4,0
0348     li  r5,0
0349     li  r6,0
0350     li  r7,0
0351     li  r8,0
0352     li  r9,0
0353     li  r10,0
0354     li  r11,0
0355     li  r12,0
0356     mtctr   r0
0357     mtspr   SPRN_XER,r0
0358 .Lsyscall_restore_regs_cont:
0359 
0360 BEGIN_FTR_SECTION
0361     HMT_MEDIUM_LOW
0362 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0363 
0364     /*
0365      * We don't need to restore AMR on the way back to userspace for KUAP.
0366      * The value of AMR only matters while we're in the kernel.
0367      */
0368     mtcr    r2
0369     REST_GPRS(2, 3, r1)
0370     REST_GPR(13, r1)
0371     REST_GPR(1, r1)
0372     RFI_TO_USER
0373     b   .   /* prevent speculative execution */
0374 
0375 .Lsyscall_restore_regs:
0376     ld  r3,_CTR(r1)
0377     ld  r4,_XER(r1)
0378     REST_NVGPRS(r1)
0379     mtctr   r3
0380     mtspr   SPRN_XER,r4
0381     ld  r0,GPR0(r1)
0382     REST_GPRS(4, 12, r1)
0383     b   .Lsyscall_restore_regs_cont
0384 .Lsyscall_rst_end:
0385 
0386 #ifdef CONFIG_PPC_BOOK3S
0387 syscall_restart:
0388 _ASM_NOKPROBE_SYMBOL(syscall_restart)
0389     GET_PACA(r13)
0390     ld  r1,PACA_EXIT_SAVE_R1(r13)
0391     ld  r2,PACATOC(r13)
0392     ld  r3,RESULT(r1)
0393     addi    r4,r1,STACK_FRAME_OVERHEAD
0394     li  r11,IRQS_ALL_DISABLED
0395     stb r11,PACAIRQSOFTMASK(r13)
0396     bl  syscall_exit_restart
0397     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0398     b   .Lsyscall_rst_start
0399 1:
0400 
0401 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
0402 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
0403 #endif
0404 
0405     /*
0406      * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
0407      * touched, no exit work created, then this can be used.
0408      */
0409     .balign IFETCH_ALIGN_BYTES
0410     .globl fast_interrupt_return_srr
0411 fast_interrupt_return_srr:
0412 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
0413     kuap_check_amr r3, r4
0414     ld  r5,_MSR(r1)
0415     andi.   r0,r5,MSR_PR
0416 #ifdef CONFIG_PPC_BOOK3S
0417     beq 1f
0418     kuap_user_restore r3, r4
0419     b   .Lfast_user_interrupt_return_srr
0420 1:  kuap_kernel_restore r3, r4
0421     andi.   r0,r5,MSR_RI
0422     li  r3,0 /* 0 return value, no EMULATE_STACK_STORE */
0423     bne+    .Lfast_kernel_interrupt_return_srr
0424     addi    r3,r1,STACK_FRAME_OVERHEAD
0425     bl  unrecoverable_exception
0426     b   . /* should not get here */
0427 #else
0428     bne .Lfast_user_interrupt_return_srr
0429     b   .Lfast_kernel_interrupt_return_srr
0430 #endif
0431 
0432 .macro interrupt_return_macro srr
0433     .balign IFETCH_ALIGN_BYTES
0434     .globl interrupt_return_\srr
0435 interrupt_return_\srr\():
0436 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
0437     ld  r4,_MSR(r1)
0438     andi.   r0,r4,MSR_PR
0439     beq interrupt_return_\srr\()_kernel
0440 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
0441 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
0442     addi    r3,r1,STACK_FRAME_OVERHEAD
0443     bl  interrupt_exit_user_prepare
0444     cmpdi   r3,0
0445     bne-    .Lrestore_nvgprs_\srr
0446 .Lrestore_nvgprs_\srr\()_cont:
0447     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0448 #ifdef CONFIG_PPC_BOOK3S
0449 .Linterrupt_return_\srr\()_user_rst_start:
0450     lbz r11,PACAIRQHAPPENED(r13)
0451     andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
0452     bne-    interrupt_return_\srr\()_user_restart
0453 #endif
0454     li  r11,IRQS_ENABLED
0455     stb r11,PACAIRQSOFTMASK(r13)
0456     li  r11,0
0457     stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
0458 
0459 .Lfast_user_interrupt_return_\srr\():
0460 #ifdef CONFIG_PPC_BOOK3S
0461     .ifc \srr,srr
0462     lbz r4,PACASRR_VALID(r13)
0463     .else
0464     lbz r4,PACAHSRR_VALID(r13)
0465     .endif
0466     cmpdi   r4,0
0467     li  r4,0
0468     bne 1f
0469 #endif
0470     ld  r11,_NIP(r1)
0471     ld  r12,_MSR(r1)
0472     .ifc \srr,srr
0473     mtspr   SPRN_SRR0,r11
0474     mtspr   SPRN_SRR1,r12
0475 1:
0476 #ifdef CONFIG_PPC_BOOK3S
0477     stb r4,PACASRR_VALID(r13)
0478 #endif
0479     .else
0480     mtspr   SPRN_HSRR0,r11
0481     mtspr   SPRN_HSRR1,r12
0482 1:
0483 #ifdef CONFIG_PPC_BOOK3S
0484     stb r4,PACAHSRR_VALID(r13)
0485 #endif
0486     .endif
0487     DEBUG_SRR_VALID \srr
0488 
0489 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
0490     lbz r4,PACAIRQSOFTMASK(r13)
0491     tdnei   r4,IRQS_ENABLED
0492 #endif
0493 
0494 BEGIN_FTR_SECTION
0495     ld  r10,_PPR(r1)
0496     mtspr   SPRN_PPR,r10
0497 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0498 
0499 BEGIN_FTR_SECTION
0500     stdcx.  r0,0,r1     /* to clear the reservation */
0501 FTR_SECTION_ELSE
0502     ldarx   r0,0,r1
0503 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
0504 
0505     ld  r3,_CCR(r1)
0506     ld  r4,_LINK(r1)
0507     ld  r5,_CTR(r1)
0508     ld  r6,_XER(r1)
0509     li  r0,0
0510 
0511     REST_GPRS(7, 13, r1)
0512 
0513     mtcr    r3
0514     mtlr    r4
0515     mtctr   r5
0516     mtspr   SPRN_XER,r6
0517 
0518     REST_GPRS(2, 6, r1)
0519     REST_GPR(0, r1)
0520     REST_GPR(1, r1)
0521     .ifc \srr,srr
0522     RFI_TO_USER
0523     .else
0524     HRFI_TO_USER
0525     .endif
0526     b   .   /* prevent speculative execution */
0527 .Linterrupt_return_\srr\()_user_rst_end:
0528 
0529 .Lrestore_nvgprs_\srr\():
0530     REST_NVGPRS(r1)
0531     b   .Lrestore_nvgprs_\srr\()_cont
0532 
0533 #ifdef CONFIG_PPC_BOOK3S
0534 interrupt_return_\srr\()_user_restart:
0535 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
0536     GET_PACA(r13)
0537     ld  r1,PACA_EXIT_SAVE_R1(r13)
0538     ld  r2,PACATOC(r13)
0539     addi    r3,r1,STACK_FRAME_OVERHEAD
0540     li  r11,IRQS_ALL_DISABLED
0541     stb r11,PACAIRQSOFTMASK(r13)
0542     bl  interrupt_exit_user_restart
0543     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0544     b   .Linterrupt_return_\srr\()_user_rst_start
0545 1:
0546 
0547 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
0548 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
0549 #endif
0550 
0551     .balign IFETCH_ALIGN_BYTES
0552 interrupt_return_\srr\()_kernel:
0553 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
0554     addi    r3,r1,STACK_FRAME_OVERHEAD
0555     bl  interrupt_exit_kernel_prepare
0556 
0557     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0558 .Linterrupt_return_\srr\()_kernel_rst_start:
0559     ld  r11,SOFTE(r1)
0560     cmpwi   r11,IRQS_ENABLED
0561     stb r11,PACAIRQSOFTMASK(r13)
0562     bne 1f
0563 #ifdef CONFIG_PPC_BOOK3S
0564     lbz r11,PACAIRQHAPPENED(r13)
0565     andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
0566     bne-    interrupt_return_\srr\()_kernel_restart
0567 #endif
0568     li  r11,0
0569     stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
0570 1:
0571 
0572 .Lfast_kernel_interrupt_return_\srr\():
0573     cmpdi   cr1,r3,0
0574 #ifdef CONFIG_PPC_BOOK3S
0575     .ifc \srr,srr
0576     lbz r4,PACASRR_VALID(r13)
0577     .else
0578     lbz r4,PACAHSRR_VALID(r13)
0579     .endif
0580     cmpdi   r4,0
0581     li  r4,0
0582     bne 1f
0583 #endif
0584     ld  r11,_NIP(r1)
0585     ld  r12,_MSR(r1)
0586     .ifc \srr,srr
0587     mtspr   SPRN_SRR0,r11
0588     mtspr   SPRN_SRR1,r12
0589 1:
0590 #ifdef CONFIG_PPC_BOOK3S
0591     stb r4,PACASRR_VALID(r13)
0592 #endif
0593     .else
0594     mtspr   SPRN_HSRR0,r11
0595     mtspr   SPRN_HSRR1,r12
0596 1:
0597 #ifdef CONFIG_PPC_BOOK3S
0598     stb r4,PACAHSRR_VALID(r13)
0599 #endif
0600     .endif
0601     DEBUG_SRR_VALID \srr
0602 
0603 BEGIN_FTR_SECTION
0604     stdcx.  r0,0,r1     /* to clear the reservation */
0605 FTR_SECTION_ELSE
0606     ldarx   r0,0,r1
0607 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
0608 
0609     ld  r3,_LINK(r1)
0610     ld  r4,_CTR(r1)
0611     ld  r5,_XER(r1)
0612     ld  r6,_CCR(r1)
0613     li  r0,0
0614 
0615     REST_GPRS(7, 12, r1)
0616 
0617     mtlr    r3
0618     mtctr   r4
0619     mtspr   SPRN_XER,r5
0620 
0621     /*
0622      * Leaving a stale exception_marker on the stack can confuse
0623      * the reliable stack unwinder later on. Clear it.
0624      */
0625     std r0,STACK_FRAME_OVERHEAD-16(r1)
0626 
0627     REST_GPRS(2, 5, r1)
0628 
0629     bne-    cr1,1f /* emulate stack store */
0630     mtcr    r6
0631     REST_GPR(6, r1)
0632     REST_GPR(0, r1)
0633     REST_GPR(1, r1)
0634     .ifc \srr,srr
0635     RFI_TO_KERNEL
0636     .else
0637     HRFI_TO_KERNEL
0638     .endif
0639     b   .   /* prevent speculative execution */
0640 
0641 1:  /*
0642      * Emulate stack store with update. New r1 value was already calculated
0643      * and updated in our interrupt regs by emulate_loadstore, but we can't
0644      * store the previous value of r1 to the stack before re-loading our
0645      * registers from it, otherwise they could be clobbered.  Use
0646      * PACA_EXGEN as temporary storage to hold the store data, as
0647      * interrupts are disabled here so it won't be clobbered.
0648      */
0649     mtcr    r6
0650     std r9,PACA_EXGEN+0(r13)
0651     addi    r9,r1,INT_FRAME_SIZE /* get original r1 */
0652     REST_GPR(6, r1)
0653     REST_GPR(0, r1)
0654     REST_GPR(1, r1)
0655     std r9,0(r1) /* perform store component of stdu */
0656     ld  r9,PACA_EXGEN+0(r13)
0657 
0658     .ifc \srr,srr
0659     RFI_TO_KERNEL
0660     .else
0661     HRFI_TO_KERNEL
0662     .endif
0663     b   .   /* prevent speculative execution */
0664 .Linterrupt_return_\srr\()_kernel_rst_end:
0665 
0666 #ifdef CONFIG_PPC_BOOK3S
0667 interrupt_return_\srr\()_kernel_restart:
0668 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
0669     GET_PACA(r13)
0670     ld  r1,PACA_EXIT_SAVE_R1(r13)
0671     ld  r2,PACATOC(r13)
0672     addi    r3,r1,STACK_FRAME_OVERHEAD
0673     li  r11,IRQS_ALL_DISABLED
0674     stb r11,PACAIRQSOFTMASK(r13)
0675     bl  interrupt_exit_kernel_restart
0676     std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
0677     b   .Linterrupt_return_\srr\()_kernel_rst_start
0678 1:
0679 
0680 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
0681 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
0682 #endif
0683 
0684 .endm
0685 
0686 interrupt_return_macro srr
0687 #ifdef CONFIG_PPC_BOOK3S
0688 interrupt_return_macro hsrr
0689 
0690     .globl __end_soft_masked
0691 __end_soft_masked:
0692 DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
0693 #endif /* CONFIG_PPC_BOOK3S */
0694 
0695 #ifdef CONFIG_PPC_BOOK3S
0696 _GLOBAL(ret_from_fork_scv)
0697     bl  schedule_tail
0698     REST_NVGPRS(r1)
0699     li  r3,0    /* fork() return value */
0700     b   .Lsyscall_vectored_common_exit
0701 #endif
0702 
0703 _GLOBAL(ret_from_fork)
0704     bl  schedule_tail
0705     REST_NVGPRS(r1)
0706     li  r3,0    /* fork() return value */
0707     b   .Lsyscall_exit
0708 
0709 _GLOBAL(ret_from_kernel_thread)
0710     bl  schedule_tail
0711     REST_NVGPRS(r1)
0712     mtctr   r14
0713     mr  r3,r15
0714 #ifdef CONFIG_PPC64_ELF_ABI_V2
0715     mr  r12,r14
0716 #endif
0717     bctrl
0718     li  r3,0
0719     b   .Lsyscall_exit