Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #include <linux/init.h>
0003 #include <linux/linkage.h>
0004 
0005 #include <asm/assembler.h>
0006 #include <asm/asm-offsets.h>
0007 #include <asm/errno.h>
0008 #include <asm/thread_info.h>
0009 #include <asm/uaccess-asm.h>
0010 #include <asm/v7m.h>
0011 
0012 @ Bad Abort numbers
0013 @ -----------------
0014 @
0015 #define BAD_PREFETCH    0
0016 #define BAD_DATA    1
0017 #define BAD_ADDREXCPTN  2
0018 #define BAD_IRQ     3
0019 #define BAD_UNDEFINSTR  4
0020 
0021 @
0022 @ Most of the stack format comes from struct pt_regs, but with
0023 @ the addition of 8 bytes for storing syscall args 5 and 6.
0024 @ This _must_ remain a multiple of 8 for EABI.
0025 @
0026 #define S_OFF       8
0027 
0028 /* 
0029  * The SWI code relies on the fact that R0 is at the bottom of the stack
0030  * (due to slow/fast restore user regs).
0031  */
0032 #if S_R0 != 0
0033 #error "Please fix"
0034 #endif
0035 
0036     .macro  zero_fp
0037 #ifdef CONFIG_FRAME_POINTER
0038     mov fp, #0
0039 #endif
0040     .endm
0041 
0042 #ifdef CONFIG_ALIGNMENT_TRAP
0043 #define ATRAP(x...) x
0044 #else
0045 #define ATRAP(x...)
0046 #endif
0047 
0048     .macro  alignment_trap, rtmp1, rtmp2, label
0049 #ifdef CONFIG_ALIGNMENT_TRAP
0050     mrc p15, 0, \rtmp2, c1, c0, 0
0051     ldr_va  \rtmp1, \label
0052     teq \rtmp1, \rtmp2
0053     mcrne   p15, 0, \rtmp1, c1, c0, 0
0054 #endif
0055     .endm
0056 
0057 #ifdef CONFIG_CPU_V7M
0058 /*
0059  * ARMv7-M exception entry/exit macros.
0060  *
0061  * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
0062  * automatically saved on the current stack (32 words) before
0063  * switching to the exception stack (SP_main).
0064  *
0065  * If exception is taken while in user mode, SP_main is
0066  * empty. Otherwise, SP_main is aligned to 64 bit automatically
0067  * (CCR.STKALIGN set).
0068  *
0069  * Linux assumes that the interrupts are disabled when entering an
0070  * exception handler and it may BUG if this is not the case. Interrupts
0071  * are disabled during entry and reenabled in the exit macro.
0072  *
0073  * v7m_exception_slow_exit is used when returning from SVC or PendSV.
0074  * When returning to kernel mode, we don't return from exception.
0075  */
0076     .macro  v7m_exception_entry
0077     @ determine the location of the registers saved by the core during
0078     @ exception entry. Depending on the mode the cpu was in when the
0079     @ exception happend that is either on the main or the process stack.
0080     @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
0081     @ was used.
0082     tst lr, #EXC_RET_STACK_MASK
0083     mrsne   r12, psp
0084     moveq   r12, sp
0085 
0086     @ we cannot rely on r0-r3 and r12 matching the value saved in the
0087     @ exception frame because of tail-chaining. So these have to be
0088     @ reloaded.
0089     ldmia   r12!, {r0-r3}
0090 
0091     @ Linux expects to have irqs off. Do it here before taking stack space
0092     cpsid   i
0093 
0094     sub sp, #PT_REGS_SIZE-S_IP
0095     stmdb   sp!, {r0-r11}
0096 
0097     @ load saved r12, lr, return address and xPSR.
0098     @ r0-r7 are used for signals and never touched from now on. Clobbering
0099     @ r8-r12 is OK.
0100     mov r9, r12
0101     ldmia   r9!, {r8, r10-r12}
0102 
0103     @ calculate the original stack pointer value.
0104     @ r9 currently points to the memory location just above the auto saved
0105     @ xPSR.
0106     @ The cpu might automatically 8-byte align the stack. Bit 9
0107     @ of the saved xPSR specifies if stack aligning took place. In this case
0108     @ another 32-bit value is included in the stack.
0109 
0110     tst r12, V7M_xPSR_FRAMEPTRALIGN
0111     addne   r9, r9, #4
0112 
0113     @ store saved r12 using str to have a register to hold the base for stm
0114     str r8, [sp, #S_IP]
0115     add r8, sp, #S_SP
0116     @ store r13-r15, xPSR
0117     stmia   r8!, {r9-r12}
0118     @ store old_r0
0119     str r0, [r8]
0120     .endm
0121 
0122         /*
0123      * PENDSV and SVCALL are configured to have the same exception
0124      * priorities. As a kernel thread runs at SVCALL execution priority it
0125      * can never be preempted and so we will never have to return to a
0126      * kernel thread here.
0127          */
0128     .macro  v7m_exception_slow_exit ret_r0
0129     cpsid   i
0130     ldr lr, =exc_ret
0131     ldr lr, [lr]
0132 
0133     @ read original r12, sp, lr, pc and xPSR
0134     add r12, sp, #S_IP
0135     ldmia   r12, {r1-r5}
0136 
0137     @ an exception frame is always 8-byte aligned. To tell the hardware if
0138     @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
0139     @ accordingly.
0140     tst r2, #4
0141     subne   r2, r2, #4
0142     orrne   r5, V7M_xPSR_FRAMEPTRALIGN
0143     biceq   r5, V7M_xPSR_FRAMEPTRALIGN
0144 
0145     @ ensure bit 0 is cleared in the PC, otherwise behaviour is
0146     @ unpredictable
0147     bic r4, #1
0148 
0149     @ write basic exception frame
0150     stmdb   r2!, {r1, r3-r5}
0151     ldmia   sp, {r1, r3-r5}
0152     .if \ret_r0
0153     stmdb   r2!, {r0, r3-r5}
0154     .else
0155     stmdb   r2!, {r1, r3-r5}
0156     .endif
0157 
0158     @ restore process sp
0159     msr psp, r2
0160 
0161     @ restore original r4-r11
0162     ldmia   sp!, {r0-r11}
0163 
0164     @ restore main sp
0165     add sp, sp, #PT_REGS_SIZE-S_IP
0166 
0167     cpsie   i
0168     bx  lr
0169     .endm
0170 #endif  /* CONFIG_CPU_V7M */
0171 
0172     @
0173     @ Store/load the USER SP and LR registers by switching to the SYS
0174     @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
0175     @ available. Should only be called from SVC mode
0176     @
0177     .macro  store_user_sp_lr, rd, rtemp, offset = 0
0178     mrs \rtemp, cpsr
0179     eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0180     msr cpsr_c, \rtemp          @ switch to the SYS mode
0181 
0182     str sp, [\rd, #\offset]     @ save sp_usr
0183     str lr, [\rd, #\offset + 4]     @ save lr_usr
0184 
0185     eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0186     msr cpsr_c, \rtemp          @ switch back to the SVC mode
0187     .endm
0188 
0189     .macro  load_user_sp_lr, rd, rtemp, offset = 0
0190     mrs \rtemp, cpsr
0191     eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0192     msr cpsr_c, \rtemp          @ switch to the SYS mode
0193 
0194     ldr sp, [\rd, #\offset]     @ load sp_usr
0195     ldr lr, [\rd, #\offset + 4]     @ load lr_usr
0196 
0197     eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0198     msr cpsr_c, \rtemp          @ switch back to the SVC mode
0199     .endm
0200 
0201 
0202     .macro  svc_exit, rpsr, irq = 0
0203     .if \irq != 0
0204     @ IRQs already off
0205 #ifdef CONFIG_TRACE_IRQFLAGS
0206     @ The parent context IRQs must have been enabled to get here in
0207     @ the first place, so there's no point checking the PSR I bit.
0208     bl  trace_hardirqs_on
0209 #endif
0210     .else
0211     @ IRQs off again before pulling preserved data off the stack
0212     disable_irq_notrace
0213 #ifdef CONFIG_TRACE_IRQFLAGS
0214     tst \rpsr, #PSR_I_BIT
0215     bleq    trace_hardirqs_on
0216     tst \rpsr, #PSR_I_BIT
0217     blne    trace_hardirqs_off
0218 #endif
0219     .endif
0220     uaccess_exit tsk, r0, r1
0221 
0222 #ifndef CONFIG_THUMB2_KERNEL
0223     @ ARM mode SVC restore
0224     msr spsr_cxsf, \rpsr
0225 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
0226     @ We must avoid clrex due to Cortex-A15 erratum #830321
0227     sub r0, sp, #4          @ uninhabited address
0228     strex   r1, r2, [r0]            @ clear the exclusive monitor
0229 #endif
0230     ldmia   sp, {r0 - pc}^          @ load r0 - pc, cpsr
0231 #else
0232     @ Thumb mode SVC restore
0233     ldr lr, [sp, #S_SP]         @ top of the stack
0234     ldrd    r0, r1, [sp, #S_LR]     @ calling lr and pc
0235 
0236     @ We must avoid clrex due to Cortex-A15 erratum #830321
0237     strex   r2, r1, [sp, #S_LR]     @ clear the exclusive monitor
0238 
0239     stmdb   lr!, {r0, r1, \rpsr}        @ calling lr and rfe context
0240     ldmia   sp, {r0 - r12}
0241     mov sp, lr
0242     ldr lr, [sp], #4
0243     rfeia   sp!
0244 #endif
0245     .endm
0246 
0247     @
0248     @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
0249     @
0250     @ This macro acts in a similar manner to svc_exit but switches to FIQ
0251     @ mode to restore the final part of the register state.
0252     @
0253     @ We cannot use the normal svc_exit procedure because that would
0254     @ clobber spsr_svc (FIQ could be delivered during the first few
0255     @ instructions of vector_swi meaning its contents have not been
0256     @ saved anywhere).
0257     @
0258     @ Note that, unlike svc_exit, this macro also does not allow a caller
0259     @ supplied rpsr. This is because the FIQ exceptions are not re-entrant
0260     @ and the handlers cannot call into the scheduler (meaning the value
0261     @ on the stack remains correct).
0262     @
0263     .macro  svc_exit_via_fiq
0264     uaccess_exit tsk, r0, r1
0265 #ifndef CONFIG_THUMB2_KERNEL
0266     @ ARM mode restore
0267     mov r0, sp
0268     ldmib   r0, {r1 - r14}  @ abort is deadly from here onward (it will
0269                 @ clobber state restored below)
0270     msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
0271     add r8, r0, #S_PC
0272     ldr r9, [r0, #S_PSR]
0273     msr spsr_cxsf, r9
0274     ldr r0, [r0, #S_R0]
0275     ldmia   r8, {pc}^
0276 #else
0277     @ Thumb mode restore
0278     add r0, sp, #S_R2
0279     ldr lr, [sp, #S_LR]
0280     ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
0281                     @ clobber state restored below)
0282     ldmia   r0, {r2 - r12}
0283     mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
0284     msr cpsr_c, r1
0285     sub r0, #S_R2
0286     add r8, r0, #S_PC
0287     ldmia   r0, {r0 - r1}
0288     rfeia   r8
0289 #endif
0290     .endm
0291 
0292 
0293     .macro  restore_user_regs, fast = 0, offset = 0
0294 #if defined(CONFIG_CPU_32v6K) && \
0295     (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
0296 #ifdef CONFIG_CPU_V6
0297 ALT_SMP(nop)
0298 ALT_UP_B(.L1_\@)
0299 #endif
0300     @ The TLS register update is deferred until return to user space so we
0301     @ can use it for other things while running in the kernel
0302     mrc p15, 0, r1, c13, c0, 3      @ get current_thread_info pointer
0303     ldr r1, [r1, #TI_TP_VALUE]
0304     mcr p15, 0, r1, c13, c0, 3      @ set TLS register
0305 .L1_\@:
0306 #endif
0307 
0308     uaccess_enable r1, isb=0
0309 #ifndef CONFIG_THUMB2_KERNEL
0310     @ ARM mode restore
0311     mov r2, sp
0312     ldr r1, [r2, #\offset + S_PSR]  @ get calling cpsr
0313     ldr lr, [r2, #\offset + S_PC]!  @ get pc
0314     tst r1, #PSR_I_BIT | 0x0f
0315     bne 1f
0316     msr spsr_cxsf, r1           @ save in spsr_svc
0317 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
0318     @ We must avoid clrex due to Cortex-A15 erratum #830321
0319     strex   r1, r2, [r2]            @ clear the exclusive monitor
0320 #endif
0321     .if \fast
0322     ldmdb   r2, {r1 - lr}^          @ get calling r1 - lr
0323     .else
0324     ldmdb   r2, {r0 - lr}^          @ get calling r0 - lr
0325     .endif
0326     mov r0, r0              @ ARMv5T and earlier require a nop
0327                         @ after ldm {}^
0328     add sp, sp, #\offset + PT_REGS_SIZE
0329     movs    pc, lr              @ return & move spsr_svc into cpsr
0330 1:  bug "Returning to usermode but unexpected PSR bits set?", \@
0331 #elif defined(CONFIG_CPU_V7M)
0332     @ V7M restore.
0333     @ Note that we don't need to do clrex here as clearing the local
0334     @ monitor is part of the exception entry and exit sequence.
0335     .if \offset
0336     add sp, #\offset
0337     .endif
0338     v7m_exception_slow_exit ret_r0 = \fast
0339 #else
0340     @ Thumb mode restore
0341     mov r2, sp
0342     load_user_sp_lr r2, r3, \offset + S_SP  @ calling sp, lr
0343     ldr r1, [sp, #\offset + S_PSR]  @ get calling cpsr
0344     ldr lr, [sp, #\offset + S_PC]   @ get pc
0345     add sp, sp, #\offset + S_SP
0346     tst r1, #PSR_I_BIT | 0x0f
0347     bne 1f
0348     msr spsr_cxsf, r1           @ save in spsr_svc
0349 
0350     @ We must avoid clrex due to Cortex-A15 erratum #830321
0351     strex   r1, r2, [sp]            @ clear the exclusive monitor
0352 
0353     .if \fast
0354     ldmdb   sp, {r1 - r12}          @ get calling r1 - r12
0355     .else
0356     ldmdb   sp, {r0 - r12}          @ get calling r0 - r12
0357     .endif
0358     add sp, sp, #PT_REGS_SIZE - S_SP
0359     movs    pc, lr              @ return & move spsr_svc into cpsr
0360 1:  bug "Returning to usermode but unexpected PSR bits set?", \@
0361 #endif  /* !CONFIG_THUMB2_KERNEL */
0362     .endm
0363 
0364 /*
0365  * Context tracking subsystem.  Used to instrument transitions
0366  * between user and kernel mode.
0367  */
0368     .macro ct_user_exit, save = 1
0369 #ifdef CONFIG_CONTEXT_TRACKING_USER
0370     .if \save
0371     stmdb   sp!, {r0-r3, ip, lr}
0372     bl  user_exit_callable
0373     ldmia   sp!, {r0-r3, ip, lr}
0374     .else
0375     bl  user_exit_callable
0376     .endif
0377 #endif
0378     .endm
0379 
0380     .macro ct_user_enter, save = 1
0381 #ifdef CONFIG_CONTEXT_TRACKING_USER
0382     .if \save
0383     stmdb   sp!, {r0-r3, ip, lr}
0384     bl  user_enter_callable
0385     ldmia   sp!, {r0-r3, ip, lr}
0386     .else
0387     bl  user_enter_callable
0388     .endif
0389 #endif
0390     .endm
0391 
0392     .macro  invoke_syscall, table, nr, tmp, ret, reload=0
0393 #ifdef CONFIG_CPU_SPECTRE
0394     mov \tmp, \nr
0395     cmp \tmp, #NR_syscalls      @ check upper syscall limit
0396     movcs   \tmp, #0
0397     csdb
0398     badr    lr, \ret            @ return address
0399     .if \reload
0400     add r1, sp, #S_R0 + S_OFF       @ pointer to regs
0401     ldmiacc r1, {r0 - r6}           @ reload r0-r6
0402     stmiacc sp, {r4, r5}            @ update stack arguments
0403     .endif
0404     ldrcc   pc, [\table, \tmp, lsl #2]  @ call sys_* routine
0405 #else
0406     cmp \nr, #NR_syscalls       @ check upper syscall limit
0407     badr    lr, \ret            @ return address
0408     .if \reload
0409     add r1, sp, #S_R0 + S_OFF       @ pointer to regs
0410     ldmiacc r1, {r0 - r6}           @ reload r0-r6
0411     stmiacc sp, {r4, r5}            @ update stack arguments
0412     .endif
0413     ldrcc   pc, [\table, \nr, lsl #2]   @ call sys_* routine
0414 #endif
0415     .endm
0416 
0417 /*
0418  * These are the registers used in the syscall handler, and allow us to
0419  * have in theory up to 7 arguments to a function - r0 to r6.
0420  *
0421  * r7 is reserved for the system call number for thumb mode.
0422  *
0423  * Note that tbl == why is intentional.
0424  *
0425  * We must set at least "tsk" and "why" when calling ret_with_reschedule.
0426  */
0427 scno    .req    r7      @ syscall number
0428 tbl .req    r8      @ syscall table pointer
0429 why .req    r8      @ Linux syscall (!= 0)
0430 tsk .req    r9      @ current thread_info
0431 
0432     .macro  do_overflow_check, frame_size:req
0433 #ifdef CONFIG_VMAP_STACK
0434     @
0435     @ Test whether the SP has overflowed. Task and IRQ stacks are aligned
0436     @ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
0437     @ zero.
0438     @
0439 ARM(    tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT)  )
0440 THUMB(  tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT)  )
0441 THUMB(  it  ne                      )
0442     bne .Lstack_overflow_check\@
0443 
0444     .pushsection    .text
0445 .Lstack_overflow_check\@:
0446     @
0447     @ The stack pointer is not pointing to a valid vmap'ed stack, but it
0448     @ may be pointing into the linear map instead, which may happen if we
0449     @ are already running from the overflow stack. We cannot detect overflow
0450     @ in such cases so just carry on.
0451     @
0452     str ip, [r0, #12]           @ Stash IP on the mode stack
0453     ldr_va  ip, high_memory         @ Start of VMALLOC space
0454 ARM(    cmp sp, ip          )   @ SP in vmalloc space?
0455 THUMB(  cmp r1, ip          )
0456 THUMB(  itt lo          )
0457     ldrlo   ip, [r0, #12]           @ Restore IP
0458     blo .Lout\@             @ Carry on
0459 
0460 THUMB(  sub r1, sp, r1      )   @ Restore original R1
0461 THUMB(  sub sp, r1          )   @ Restore original SP
0462     add sp, sp, #\frame_size        @ Undo svc_entry's SP change
0463     b   __bad_stack         @ Handle VMAP stack overflow
0464     .popsection
0465 .Lout\@:
0466 #endif
0467     .endm