Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  *  PowerPC version
0004  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
0005  *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
0006  *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
0007  *  Adapted for Power Macintosh by Paul Mackerras.
0008  *  Low-level exception handlers and MMU support
0009  *  rewritten by Paul Mackerras.
0010  *    Copyright (C) 1996 Paul Mackerras.
0011  *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
0012  *
0013  *  This file contains the system call entry code, context switch
0014  *  code, and exception/interrupt return code for PowerPC.
0015  */
0016 
0017 #include <linux/errno.h>
0018 #include <linux/err.h>
0019 #include <linux/sys.h>
0020 #include <linux/threads.h>
0021 #include <asm/reg.h>
0022 #include <asm/page.h>
0023 #include <asm/mmu.h>
0024 #include <asm/cputable.h>
0025 #include <asm/thread_info.h>
0026 #include <asm/ppc_asm.h>
0027 #include <asm/asm-offsets.h>
0028 #include <asm/unistd.h>
0029 #include <asm/ptrace.h>
0030 #include <asm/export.h>
0031 #include <asm/feature-fixups.h>
0032 #include <asm/barrier.h>
0033 #include <asm/kup.h>
0034 #include <asm/bug.h>
0035 #include <asm/interrupt.h>
0036 
0037 #include "head_32.h"
0038 
0039 /*
0040  * powerpc relies on return from interrupt/syscall being context synchronising
0041  * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
0042  * synchronisation instructions.
0043  */
0044 
0045 /*
0046  * Align to 4k in order to ensure that all functions modyfing srr0/srr1
0047  * fit into one page in order to not encounter a TLB miss between the
0048  * modification of srr0/srr1 and the associated rfi.
0049  */
0050     .align  12
0051 
0052 #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
0053     .globl  prepare_transfer_to_handler
0054 prepare_transfer_to_handler:
0055     /* if from kernel, check interrupted DOZE/NAP mode */
0056     lwz r12,TI_LOCAL_FLAGS(r2)
0057     mtcrf   0x01,r12
0058     bt- 31-TLF_NAPPING,4f
0059     bt- 31-TLF_SLEEPING,7f
0060     blr
0061 
0062 4:  rlwinm  r12,r12,0,~_TLF_NAPPING
0063     stw r12,TI_LOCAL_FLAGS(r2)
0064     b   power_save_ppc32_restore
0065 
0066 7:  rlwinm  r12,r12,0,~_TLF_SLEEPING
0067     stw r12,TI_LOCAL_FLAGS(r2)
0068     lwz r9,_MSR(r11)        /* if sleeping, clear MSR.EE */
0069     rlwinm  r9,r9,0,~MSR_EE
0070     lwz r12,_LINK(r11)      /* and return to address in LR */
0071     lwz r2, GPR2(r11)
0072     b   fast_exception_return
0073 _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
0074 #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
0075 
0076 #if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
0077     .globl  __kuep_lock
0078 __kuep_lock:
0079     lwz r9, THREAD+THSR0(r2)
0080     update_user_segments_by_4 r9, r10, r11, r12
0081     blr
0082 
0083 __kuep_unlock:
0084     lwz r9, THREAD+THSR0(r2)
0085     rlwinm  r9,r9,0,~SR_NX
0086     update_user_segments_by_4 r9, r10, r11, r12
0087     blr
0088 
0089 .macro  kuep_lock
0090     bl  __kuep_lock
0091 .endm
0092 .macro  kuep_unlock
0093     bl  __kuep_unlock
0094 .endm
0095 #else
0096 .macro  kuep_lock
0097 .endm
0098 .macro  kuep_unlock
0099 .endm
0100 #endif
0101 
0102     .globl  transfer_to_syscall
0103 transfer_to_syscall:
0104     stw r11, GPR1(r1)
0105     stw r11, 0(r1)
0106     mflr    r12
0107     stw r12, _LINK(r1)
0108 #ifdef CONFIG_BOOKE_OR_40x
0109     rlwinm  r9,r9,0,14,12       /* clear MSR_WE (necessary?) */
0110 #endif
0111     lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
0112     SAVE_GPR(2, r1)
0113     addi    r12,r12,STACK_FRAME_REGS_MARKER@l
0114     stw r9,_MSR(r1)
0115     li  r2, INTERRUPT_SYSCALL
0116     stw r12,8(r1)
0117     stw r2,_TRAP(r1)
0118     SAVE_GPR(0, r1)
0119     SAVE_GPRS(3, 8, r1)
0120     addi    r2,r10,-THREAD
0121     SAVE_NVGPRS(r1)
0122     kuep_lock
0123 
0124     /* Calling convention has r9 = orig r0, r10 = regs */
0125     addi    r10,r1,STACK_FRAME_OVERHEAD
0126     mr  r9,r0
0127     bl  system_call_exception
0128 
0129 ret_from_syscall:
0130     addi    r4,r1,STACK_FRAME_OVERHEAD
0131     li  r5,0
0132     bl  syscall_exit_prepare
0133 #ifdef CONFIG_PPC_47x
0134     lis r4,icache_44x_need_flush@ha
0135     lwz r5,icache_44x_need_flush@l(r4)
0136     cmplwi  cr0,r5,0
0137     bne-    2f
0138 #endif /* CONFIG_PPC_47x */
0139     kuep_unlock
0140     lwz r4,_LINK(r1)
0141     lwz r5,_CCR(r1)
0142     mtlr    r4
0143     lwz r7,_NIP(r1)
0144     lwz r8,_MSR(r1)
0145     cmpwi   r3,0
0146     lwz r3,GPR3(r1)
0147 syscall_exit_finish:
0148     mtspr   SPRN_SRR0,r7
0149     mtspr   SPRN_SRR1,r8
0150 
0151     bne 3f
0152     mtcr    r5
0153 
0154 1:  lwz r2,GPR2(r1)
0155     lwz r1,GPR1(r1)
0156     rfi
0157 #ifdef CONFIG_40x
0158     b . /* Prevent prefetch past rfi */
0159 #endif
0160 
0161 3:  mtcr    r5
0162     lwz r4,_CTR(r1)
0163     lwz r5,_XER(r1)
0164     REST_NVGPRS(r1)
0165     mtctr   r4
0166     mtxer   r5
0167     lwz r0,GPR0(r1)
0168     lwz r3,GPR3(r1)
0169     REST_GPRS(4, 11, r1)
0170     lwz r12,GPR12(r1)
0171     b   1b
0172 
0173 #ifdef CONFIG_44x
0174 2:  li  r7,0
0175     iccci   r0,r0
0176     stw r7,icache_44x_need_flush@l(r4)
0177     b   1b
0178 #endif  /* CONFIG_44x */
0179 
0180     .globl  ret_from_fork
0181 ret_from_fork:
0182     REST_NVGPRS(r1)
0183     bl  schedule_tail
0184     li  r3,0
0185     b   ret_from_syscall
0186 
0187     .globl  ret_from_kernel_thread
0188 ret_from_kernel_thread:
0189     REST_NVGPRS(r1)
0190     bl  schedule_tail
0191     mtctr   r14
0192     mr  r3,r15
0193     PPC440EP_ERR42
0194     bctrl
0195     li  r3,0
0196     b   ret_from_syscall
0197 
0198 /*
0199  * This routine switches between two different tasks.  The process
0200  * state of one is saved on its kernel stack.  Then the state
0201  * of the other is restored from its kernel stack.  The memory
0202  * management hardware is updated to the second process's state.
0203  * Finally, we can return to the second process.
0204  * On entry, r3 points to the THREAD for the current task, r4
0205  * points to the THREAD for the new task.
0206  *
0207  * This routine is always called with interrupts disabled.
0208  *
0209  * Note: there are two ways to get to the "going out" portion
0210  * of this code; either by coming in via the entry (_switch)
0211  * or via "fork" which must set up an environment equivalent
0212  * to the "_switch" path.  If you change this , you'll have to
0213  * change the fork code also.
0214  *
0215  * The code which creates the new task context is in 'copy_thread'
0216  * in arch/ppc/kernel/process.c
0217  */
0218 _GLOBAL(_switch)
0219     stwu    r1,-INT_FRAME_SIZE(r1)
0220     mflr    r0
0221     stw r0,INT_FRAME_SIZE+4(r1)
0222     /* r3-r12 are caller saved -- Cort */
0223     SAVE_NVGPRS(r1)
0224     stw r0,_NIP(r1) /* Return to switch caller */
0225     mfcr    r10
0226     stw r10,_CCR(r1)
0227     stw r1,KSP(r3)  /* Set old stack pointer */
0228 
0229 #ifdef CONFIG_SMP
0230     /* We need a sync somewhere here to make sure that if the
0231      * previous task gets rescheduled on another CPU, it sees all
0232      * stores it has performed on this one.
0233      */
0234     sync
0235 #endif /* CONFIG_SMP */
0236 
0237     tophys(r0,r4)
0238     mtspr   SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
0239     lwz r1,KSP(r4)  /* Load new stack pointer */
0240 
0241     /* save the old current 'last' for return value */
0242     mr  r3,r2
0243     addi    r2,r4,-THREAD   /* Update current */
0244 
0245     lwz r0,_CCR(r1)
0246     mtcrf   0xFF,r0
0247     /* r3-r12 are destroyed -- Cort */
0248     REST_NVGPRS(r1)
0249 
0250     lwz r4,_NIP(r1) /* Return to _switch caller in new task */
0251     mtlr    r4
0252     addi    r1,r1,INT_FRAME_SIZE
0253     blr
0254 
0255     .globl  fast_exception_return
0256 fast_exception_return:
0257 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
0258     andi.   r10,r9,MSR_RI       /* check for recoverable interrupt */
0259     beq 3f          /* if not, we've got problems */
0260 #endif
0261 
0262 2:  REST_GPRS(3, 6, r11)
0263     lwz r10,_CCR(r11)
0264     REST_GPRS(1, 2, r11)
0265     mtcr    r10
0266     lwz r10,_LINK(r11)
0267     mtlr    r10
0268     /* Clear the exception_marker on the stack to avoid confusing stacktrace */
0269     li  r10, 0
0270     stw r10, 8(r11)
0271     REST_GPR(10, r11)
0272 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
0273     mtspr   SPRN_NRI, r0
0274 #endif
0275     mtspr   SPRN_SRR1,r9
0276     mtspr   SPRN_SRR0,r12
0277     REST_GPR(9, r11)
0278     REST_GPR(12, r11)
0279     lwz r11,GPR11(r11)
0280     rfi
0281 #ifdef CONFIG_40x
0282     b . /* Prevent prefetch past rfi */
0283 #endif
0284 _ASM_NOKPROBE_SYMBOL(fast_exception_return)
0285 
0286 /* aargh, a nonrecoverable interrupt, panic */
0287 /* aargh, we don't know which trap this is */
0288 3:
0289     li  r10,-1
0290     stw r10,_TRAP(r11)
0291     prepare_transfer_to_handler
0292     bl  unrecoverable_exception
0293     trap    /* should not get here */
0294 
0295     .globl interrupt_return
0296 interrupt_return:
0297     lwz r4,_MSR(r1)
0298     addi    r3,r1,STACK_FRAME_OVERHEAD
0299     andi.   r0,r4,MSR_PR
0300     beq .Lkernel_interrupt_return
0301     bl  interrupt_exit_user_prepare
0302     cmpwi   r3,0
0303     kuep_unlock
0304     bne-    .Lrestore_nvgprs
0305 
0306 .Lfast_user_interrupt_return:
0307     lwz r11,_NIP(r1)
0308     lwz r12,_MSR(r1)
0309     mtspr   SPRN_SRR0,r11
0310     mtspr   SPRN_SRR1,r12
0311 
0312 BEGIN_FTR_SECTION
0313     stwcx.  r0,0,r1     /* to clear the reservation */
0314 FTR_SECTION_ELSE
0315     lwarx   r0,0,r1
0316 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
0317 
0318     lwz r3,_CCR(r1)
0319     lwz r4,_LINK(r1)
0320     lwz r5,_CTR(r1)
0321     lwz r6,_XER(r1)
0322     li  r0,0
0323 
0324     /*
0325      * Leaving a stale exception_marker on the stack can confuse
0326      * the reliable stack unwinder later on. Clear it.
0327      */
0328     stw r0,8(r1)
0329     REST_GPRS(7, 12, r1)
0330 
0331     mtcr    r3
0332     mtlr    r4
0333     mtctr   r5
0334     mtspr   SPRN_XER,r6
0335 
0336     REST_GPRS(2, 6, r1)
0337     REST_GPR(0, r1)
0338     REST_GPR(1, r1)
0339     rfi
0340 #ifdef CONFIG_40x
0341     b . /* Prevent prefetch past rfi */
0342 #endif
0343 
0344 .Lrestore_nvgprs:
0345     REST_NVGPRS(r1)
0346     b   .Lfast_user_interrupt_return
0347 
0348 .Lkernel_interrupt_return:
0349     bl  interrupt_exit_kernel_prepare
0350 
0351 .Lfast_kernel_interrupt_return:
0352     cmpwi   cr1,r3,0
0353     lwz r11,_NIP(r1)
0354     lwz r12,_MSR(r1)
0355     mtspr   SPRN_SRR0,r11
0356     mtspr   SPRN_SRR1,r12
0357 
0358 BEGIN_FTR_SECTION
0359     stwcx.  r0,0,r1     /* to clear the reservation */
0360 FTR_SECTION_ELSE
0361     lwarx   r0,0,r1
0362 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
0363 
0364     lwz r3,_LINK(r1)
0365     lwz r4,_CTR(r1)
0366     lwz r5,_XER(r1)
0367     lwz r6,_CCR(r1)
0368     li  r0,0
0369 
0370     REST_GPRS(7, 12, r1)
0371 
0372     mtlr    r3
0373     mtctr   r4
0374     mtspr   SPRN_XER,r5
0375 
0376     /*
0377      * Leaving a stale exception_marker on the stack can confuse
0378      * the reliable stack unwinder later on. Clear it.
0379      */
0380     stw r0,8(r1)
0381 
0382     REST_GPRS(2, 5, r1)
0383 
0384     bne-    cr1,1f /* emulate stack store */
0385     mtcr    r6
0386     REST_GPR(6, r1)
0387     REST_GPR(0, r1)
0388     REST_GPR(1, r1)
0389     rfi
0390 #ifdef CONFIG_40x
0391     b . /* Prevent prefetch past rfi */
0392 #endif
0393 
0394 1:  /*
0395      * Emulate stack store with update. New r1 value was already calculated
0396      * and updated in our interrupt regs by emulate_loadstore, but we can't
0397      * store the previous value of r1 to the stack before re-loading our
0398      * registers from it, otherwise they could be clobbered.  Use
0399      * SPRG Scratch0 as temporary storage to hold the store
0400      * data, as interrupts are disabled here so it won't be clobbered.
0401      */
0402     mtcr    r6
0403 #ifdef CONFIG_BOOKE
0404     mtspr   SPRN_SPRG_WSCRATCH0, r9
0405 #else
0406     mtspr   SPRN_SPRG_SCRATCH0, r9
0407 #endif
0408     addi    r9,r1,INT_FRAME_SIZE /* get original r1 */
0409     REST_GPR(6, r1)
0410     REST_GPR(0, r1)
0411     REST_GPR(1, r1)
0412     stw r9,0(r1) /* perform store component of stwu */
0413 #ifdef CONFIG_BOOKE
0414     mfspr   r9, SPRN_SPRG_RSCRATCH0
0415 #else
0416     mfspr   r9, SPRN_SPRG_SCRATCH0
0417 #endif
0418     rfi
0419 #ifdef CONFIG_40x
0420     b . /* Prevent prefetch past rfi */
0421 #endif
0422 _ASM_NOKPROBE_SYMBOL(interrupt_return)
0423 
0424 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
0425 
0426 /*
0427  * Returning from a critical interrupt in user mode doesn't need
0428  * to be any different from a normal exception.  For a critical
0429  * interrupt in the kernel, we just return (without checking for
0430  * preemption) since the interrupt may have happened at some crucial
0431  * place (e.g. inside the TLB miss handler), and because we will be
0432  * running with r1 pointing into critical_stack, not the current
0433  * process's kernel stack (and therefore current_thread_info() will
0434  * give the wrong answer).
0435  * We have to restore various SPRs that may have been in use at the
0436  * time of the critical interrupt.
0437  *
0438  */
0439 #ifdef CONFIG_40x
0440 #define PPC_40x_TURN_OFF_MSR_DR                         \
0441     /* avoid any possible TLB misses here by turning off MSR.DR, we     \
0442      * assume the instructions here are mapped by a pinned TLB entry */ \
0443     li  r10,MSR_IR;                         \
0444     mtmsr   r10;                                \
0445     isync;                                  \
0446     tophys(r1, r1);
0447 #else
0448 #define PPC_40x_TURN_OFF_MSR_DR
0449 #endif
0450 
0451 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
0452     REST_NVGPRS(r1);                        \
0453     lwz r3,_MSR(r1);                        \
0454     andi.   r3,r3,MSR_PR;                       \
0455     bne interrupt_return;                   \
0456     lwz r0,GPR0(r1);                        \
0457     lwz r2,GPR2(r1);                        \
0458     REST_GPRS(3, 8, r1);                        \
0459     lwz r10,_XER(r1);                       \
0460     lwz r11,_CTR(r1);                       \
0461     mtspr   SPRN_XER,r10;                       \
0462     mtctr   r11;                            \
0463     stwcx.  r0,0,r1;        /* to clear the reservation */  \
0464     lwz r11,_LINK(r1);                      \
0465     mtlr    r11;                            \
0466     lwz r10,_CCR(r1);                       \
0467     mtcrf   0xff,r10;                       \
0468     PPC_40x_TURN_OFF_MSR_DR;                    \
0469     lwz r9,_DEAR(r1);                       \
0470     lwz r10,_ESR(r1);                       \
0471     mtspr   SPRN_DEAR,r9;                       \
0472     mtspr   SPRN_ESR,r10;                       \
0473     lwz r11,_NIP(r1);                       \
0474     lwz r12,_MSR(r1);                       \
0475     mtspr   exc_lvl_srr0,r11;                   \
0476     mtspr   exc_lvl_srr1,r12;                   \
0477     lwz r9,GPR9(r1);                        \
0478     lwz r12,GPR12(r1);                      \
0479     lwz r10,GPR10(r1);                      \
0480     lwz r11,GPR11(r1);                      \
0481     lwz r1,GPR1(r1);                        \
0482     exc_lvl_rfi;                            \
0483     b   .;      /* prevent prefetch past exc_lvl_rfi */
0484 
0485 #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)            \
0486     lwz r9,_##exc_lvl_srr0(r1);                 \
0487     lwz r10,_##exc_lvl_srr1(r1);                \
0488     mtspr   SPRN_##exc_lvl_srr0,r9;                 \
0489     mtspr   SPRN_##exc_lvl_srr1,r10;
0490 
0491 #if defined(CONFIG_PPC_BOOK3E_MMU)
0492 #ifdef CONFIG_PHYS_64BIT
0493 #define RESTORE_MAS7                            \
0494     lwz r11,MAS7(r1);                       \
0495     mtspr   SPRN_MAS7,r11;
0496 #else
0497 #define RESTORE_MAS7
0498 #endif /* CONFIG_PHYS_64BIT */
0499 #define RESTORE_MMU_REGS                        \
0500     lwz r9,MAS0(r1);                        \
0501     lwz r10,MAS1(r1);                       \
0502     lwz r11,MAS2(r1);                       \
0503     mtspr   SPRN_MAS0,r9;                       \
0504     lwz r9,MAS3(r1);                        \
0505     mtspr   SPRN_MAS1,r10;                      \
0506     lwz r10,MAS6(r1);                       \
0507     mtspr   SPRN_MAS2,r11;                      \
0508     mtspr   SPRN_MAS3,r9;                       \
0509     mtspr   SPRN_MAS6,r10;                      \
0510     RESTORE_MAS7;
0511 #elif defined(CONFIG_44x)
0512 #define RESTORE_MMU_REGS                        \
0513     lwz r9,MMUCR(r1);                       \
0514     mtspr   SPRN_MMUCR,r9;
0515 #else
0516 #define RESTORE_MMU_REGS
0517 #endif
0518 
0519 #ifdef CONFIG_40x
0520     .globl  ret_from_crit_exc
0521 ret_from_crit_exc:
0522     lis r9,crit_srr0@ha;
0523     lwz r9,crit_srr0@l(r9);
0524     lis r10,crit_srr1@ha;
0525     lwz r10,crit_srr1@l(r10);
0526     mtspr   SPRN_SRR0,r9;
0527     mtspr   SPRN_SRR1,r10;
0528     RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
0529 _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
0530 #endif /* CONFIG_40x */
0531 
0532 #ifdef CONFIG_BOOKE
0533     .globl  ret_from_crit_exc
0534 ret_from_crit_exc:
0535     RESTORE_xSRR(SRR0,SRR1);
0536     RESTORE_MMU_REGS;
0537     RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
0538 _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
0539 
0540     .globl  ret_from_debug_exc
0541 ret_from_debug_exc:
0542     RESTORE_xSRR(SRR0,SRR1);
0543     RESTORE_xSRR(CSRR0,CSRR1);
0544     RESTORE_MMU_REGS;
0545     RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
0546 _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
0547 
0548     .globl  ret_from_mcheck_exc
0549 ret_from_mcheck_exc:
0550     RESTORE_xSRR(SRR0,SRR1);
0551     RESTORE_xSRR(CSRR0,CSRR1);
0552     RESTORE_xSRR(DSRR0,DSRR1);
0553     RESTORE_MMU_REGS;
0554     RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
0555 _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
0556 #endif /* CONFIG_BOOKE */
0557 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */