0001
0002 #include <linux/init.h>
0003 #include <linux/linkage.h>
0004
0005 #include <asm/assembler.h>
0006 #include <asm/asm-offsets.h>
0007 #include <asm/errno.h>
0008 #include <asm/thread_info.h>
0009 #include <asm/uaccess-asm.h>
0010 #include <asm/v7m.h>
0011
0012 @ Bad Abort numbers
0013 @ -----------------
0014 @
0015 #define BAD_PREFETCH 0
0016 #define BAD_DATA 1
0017 #define BAD_ADDREXCPTN 2
0018 #define BAD_IRQ 3
0019 #define BAD_UNDEFINSTR 4
0020
0021 @
0022 @ Most of the stack format comes from struct pt_regs, but with
0023 @ the addition of 8 bytes for storing syscall args 5 and 6.
0024 @ This _must_ remain a multiple of 8 for EABI.
0025 @
0026 #define S_OFF 8
0027
0028
0029
0030
0031
0032 #if S_R0 != 0
0033 #error "Please fix"
0034 #endif
0035
0036 .macro zero_fp
0037 #ifdef CONFIG_FRAME_POINTER
0038 mov fp, #0
0039 #endif
0040 .endm
0041
0042 #ifdef CONFIG_ALIGNMENT_TRAP
0043 #define ATRAP(x...) x
0044 #else
0045 #define ATRAP(x...)
0046 #endif
0047
0048 .macro alignment_trap, rtmp1, rtmp2, label
0049 #ifdef CONFIG_ALIGNMENT_TRAP
0050 mrc p15, 0, \rtmp2, c1, c0, 0
0051 ldr_va \rtmp1, \label
0052 teq \rtmp1, \rtmp2
0053 mcrne p15, 0, \rtmp1, c1, c0, 0
0054 #endif
0055 .endm
0056
0057 #ifdef CONFIG_CPU_V7M
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076 .macro v7m_exception_entry
0077 @ determine the location of the registers saved by the core during
0078 @ exception entry. Depending on the mode the cpu was in when the
0079 @ exception happend that is either on the main or the process stack.
0080 @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
0081 @ was used.
0082 tst lr, #EXC_RET_STACK_MASK
0083 mrsne r12, psp
0084 moveq r12, sp
0085
0086 @ we cannot rely on r0-r3 and r12 matching the value saved in the
0087 @ exception frame because of tail-chaining. So these have to be
0088 @ reloaded.
0089 ldmia r12!, {r0-r3}
0090
0091 @ Linux expects to have irqs off. Do it here before taking stack space
0092 cpsid i
0093
0094 sub sp, #PT_REGS_SIZE-S_IP
0095 stmdb sp!, {r0-r11}
0096
0097 @ load saved r12, lr, return address and xPSR.
0098 @ r0-r7 are used for signals and never touched from now on. Clobbering
0099 @ r8-r12 is OK.
0100 mov r9, r12
0101 ldmia r9!, {r8, r10-r12}
0102
0103 @ calculate the original stack pointer value.
0104 @ r9 currently points to the memory location just above the auto saved
0105 @ xPSR.
0106 @ The cpu might automatically 8-byte align the stack. Bit 9
0107 @ of the saved xPSR specifies if stack aligning took place. In this case
0108 @ another 32-bit value is included in the stack.
0109
0110 tst r12, V7M_xPSR_FRAMEPTRALIGN
0111 addne r9, r9, #4
0112
0113 @ store saved r12 using str to have a register to hold the base for stm
0114 str r8, [sp, #S_IP]
0115 add r8, sp, #S_SP
0116 @ store r13-r15, xPSR
0117 stmia r8!, {r9-r12}
0118 @ store old_r0
0119 str r0, [r8]
0120 .endm
0121
0122
0123
0124
0125
0126
0127
0128 .macro v7m_exception_slow_exit ret_r0
0129 cpsid i
0130 ldr lr, =exc_ret
0131 ldr lr, [lr]
0132
0133 @ read original r12, sp, lr, pc and xPSR
0134 add r12, sp, #S_IP
0135 ldmia r12, {r1-r5}
0136
0137 @ an exception frame is always 8-byte aligned. To tell the hardware if
0138 @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
0139 @ accordingly.
0140 tst r2, #4
0141 subne r2, r2, #4
0142 orrne r5, V7M_xPSR_FRAMEPTRALIGN
0143 biceq r5, V7M_xPSR_FRAMEPTRALIGN
0144
0145 @ ensure bit 0 is cleared in the PC, otherwise behaviour is
0146 @ unpredictable
0147 bic r4, #1
0148
0149 @ write basic exception frame
0150 stmdb r2!, {r1, r3-r5}
0151 ldmia sp, {r1, r3-r5}
0152 .if \ret_r0
0153 stmdb r2!, {r0, r3-r5}
0154 .else
0155 stmdb r2!, {r1, r3-r5}
0156 .endif
0157
0158 @ restore process sp
0159 msr psp, r2
0160
0161 @ restore original r4-r11
0162 ldmia sp!, {r0-r11}
0163
0164 @ restore main sp
0165 add sp, sp, #PT_REGS_SIZE-S_IP
0166
0167 cpsie i
0168 bx lr
0169 .endm
0170 #endif
0171
0172 @
0173 @ Store/load the USER SP and LR registers by switching to the SYS
0174 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
0175 @ available. Should only be called from SVC mode
0176 @
0177 .macro store_user_sp_lr, rd, rtemp, offset = 0
0178 mrs \rtemp, cpsr
0179 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0180 msr cpsr_c, \rtemp @ switch to the SYS mode
0181
0182 str sp, [\rd, #\offset] @ save sp_usr
0183 str lr, [\rd, #\offset + 4] @ save lr_usr
0184
0185 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0186 msr cpsr_c, \rtemp @ switch back to the SVC mode
0187 .endm
0188
0189 .macro load_user_sp_lr, rd, rtemp, offset = 0
0190 mrs \rtemp, cpsr
0191 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0192 msr cpsr_c, \rtemp @ switch to the SYS mode
0193
0194 ldr sp, [\rd, #\offset] @ load sp_usr
0195 ldr lr, [\rd, #\offset + 4] @ load lr_usr
0196
0197 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
0198 msr cpsr_c, \rtemp @ switch back to the SVC mode
0199 .endm
0200
0201
0202 .macro svc_exit, rpsr, irq = 0
0203 .if \irq != 0
0204 @ IRQs already off
0205 #ifdef CONFIG_TRACE_IRQFLAGS
0206 @ The parent context IRQs must have been enabled to get here in
0207 @ the first place, so there's no point checking the PSR I bit.
0208 bl trace_hardirqs_on
0209 #endif
0210 .else
0211 @ IRQs off again before pulling preserved data off the stack
0212 disable_irq_notrace
0213 #ifdef CONFIG_TRACE_IRQFLAGS
0214 tst \rpsr, #PSR_I_BIT
0215 bleq trace_hardirqs_on
0216 tst \rpsr, #PSR_I_BIT
0217 blne trace_hardirqs_off
0218 #endif
0219 .endif
0220 uaccess_exit tsk, r0, r1
0221
0222 #ifndef CONFIG_THUMB2_KERNEL
0223 @ ARM mode SVC restore
0224 msr spsr_cxsf, \rpsr
0225 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
0226 @ We must avoid clrex due to Cortex-A15 erratum #830321
0227 sub r0, sp, #4 @ uninhabited address
0228 strex r1, r2, [r0] @ clear the exclusive monitor
0229 #endif
0230 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
0231 #else
0232 @ Thumb mode SVC restore
0233 ldr lr, [sp, #S_SP] @ top of the stack
0234 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
0235
0236 @ We must avoid clrex due to Cortex-A15 erratum #830321
0237 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
0238
0239 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
0240 ldmia sp, {r0 - r12}
0241 mov sp, lr
0242 ldr lr, [sp], #4
0243 rfeia sp!
0244 #endif
0245 .endm
0246
0247 @
0248 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
0249 @
0250 @ This macro acts in a similar manner to svc_exit but switches to FIQ
0251 @ mode to restore the final part of the register state.
0252 @
0253 @ We cannot use the normal svc_exit procedure because that would
0254 @ clobber spsr_svc (FIQ could be delivered during the first few
0255 @ instructions of vector_swi meaning its contents have not been
0256 @ saved anywhere).
0257 @
0258 @ Note that, unlike svc_exit, this macro also does not allow a caller
0259 @ supplied rpsr. This is because the FIQ exceptions are not re-entrant
0260 @ and the handlers cannot call into the scheduler (meaning the value
0261 @ on the stack remains correct).
0262 @
0263 .macro svc_exit_via_fiq
0264 uaccess_exit tsk, r0, r1
0265 #ifndef CONFIG_THUMB2_KERNEL
0266 @ ARM mode restore
0267 mov r0, sp
0268 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
0269 @ clobber state restored below)
0270 msr cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
0271 add r8, r0, #S_PC
0272 ldr r9, [r0, #S_PSR]
0273 msr spsr_cxsf, r9
0274 ldr r0, [r0, #S_R0]
0275 ldmia r8, {pc}^
0276 #else
0277 @ Thumb mode restore
0278 add r0, sp, #S_R2
0279 ldr lr, [sp, #S_LR]
0280 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
0281 @ clobber state restored below)
0282 ldmia r0, {r2 - r12}
0283 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
0284 msr cpsr_c, r1
0285 sub r0, #S_R2
0286 add r8, r0, #S_PC
0287 ldmia r0, {r0 - r1}
0288 rfeia r8
0289 #endif
0290 .endm
0291
0292
0293 .macro restore_user_regs, fast = 0, offset = 0
0294 #if defined(CONFIG_CPU_32v6K) && \
0295 (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
0296 #ifdef CONFIG_CPU_V6
0297 ALT_SMP(nop)
0298 ALT_UP_B(.L1_\@)
0299 #endif
0300 @ The TLS register update is deferred until return to user space so we
0301 @ can use it for other things while running in the kernel
0302 mrc p15, 0, r1, c13, c0, 3 @ get current_thread_info pointer
0303 ldr r1, [r1, #TI_TP_VALUE]
0304 mcr p15, 0, r1, c13, c0, 3 @ set TLS register
0305 .L1_\@:
0306 #endif
0307
0308 uaccess_enable r1, isb=0
0309 #ifndef CONFIG_THUMB2_KERNEL
0310 @ ARM mode restore
0311 mov r2, sp
0312 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
0313 ldr lr, [r2, #\offset + S_PC]! @ get pc
0314 tst r1, #PSR_I_BIT | 0x0f
0315 bne 1f
0316 msr spsr_cxsf, r1 @ save in spsr_svc
0317 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
0318 @ We must avoid clrex due to Cortex-A15 erratum #830321
0319 strex r1, r2, [r2] @ clear the exclusive monitor
0320 #endif
0321 .if \fast
0322 ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
0323 .else
0324 ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
0325 .endif
0326 mov r0, r0 @ ARMv5T and earlier require a nop
0327 @ after ldm {}^
0328 add sp, sp, #\offset + PT_REGS_SIZE
0329 movs pc, lr @ return & move spsr_svc into cpsr
0330 1: bug "Returning to usermode but unexpected PSR bits set?", \@
0331 #elif defined(CONFIG_CPU_V7M)
0332 @ V7M restore.
0333 @ Note that we don't need to do clrex here as clearing the local
0334 @ monitor is part of the exception entry and exit sequence.
0335 .if \offset
0336 add sp, #\offset
0337 .endif
0338 v7m_exception_slow_exit ret_r0 = \fast
0339 #else
0340 @ Thumb mode restore
0341 mov r2, sp
0342 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
0343 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
0344 ldr lr, [sp, #\offset + S_PC] @ get pc
0345 add sp, sp, #\offset + S_SP
0346 tst r1, #PSR_I_BIT | 0x0f
0347 bne 1f
0348 msr spsr_cxsf, r1 @ save in spsr_svc
0349
0350 @ We must avoid clrex due to Cortex-A15 erratum #830321
0351 strex r1, r2, [sp] @ clear the exclusive monitor
0352
0353 .if \fast
0354 ldmdb sp, {r1 - r12} @ get calling r1 - r12
0355 .else
0356 ldmdb sp, {r0 - r12} @ get calling r0 - r12
0357 .endif
0358 add sp, sp, #PT_REGS_SIZE - S_SP
0359 movs pc, lr @ return & move spsr_svc into cpsr
0360 1: bug "Returning to usermode but unexpected PSR bits set?", \@
0361 #endif
0362 .endm
0363
0364
0365
0366
0367
0368 .macro ct_user_exit, save = 1
0369 #ifdef CONFIG_CONTEXT_TRACKING_USER
0370 .if \save
0371 stmdb sp!, {r0-r3, ip, lr}
0372 bl user_exit_callable
0373 ldmia sp!, {r0-r3, ip, lr}
0374 .else
0375 bl user_exit_callable
0376 .endif
0377 #endif
0378 .endm
0379
0380 .macro ct_user_enter, save = 1
0381 #ifdef CONFIG_CONTEXT_TRACKING_USER
0382 .if \save
0383 stmdb sp!, {r0-r3, ip, lr}
0384 bl user_enter_callable
0385 ldmia sp!, {r0-r3, ip, lr}
0386 .else
0387 bl user_enter_callable
0388 .endif
0389 #endif
0390 .endm
0391
0392 .macro invoke_syscall, table, nr, tmp, ret, reload=0
0393 #ifdef CONFIG_CPU_SPECTRE
0394 mov \tmp, \nr
0395 cmp \tmp, #NR_syscalls @ check upper syscall limit
0396 movcs \tmp, #0
0397 csdb
0398 badr lr, \ret @ return address
0399 .if \reload
0400 add r1, sp, #S_R0 + S_OFF @ pointer to regs
0401 ldmiacc r1, {r0 - r6} @ reload r0-r6
0402 stmiacc sp, {r4, r5} @ update stack arguments
0403 .endif
0404 ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine
0405 #else
0406 cmp \nr, #NR_syscalls @ check upper syscall limit
0407 badr lr, \ret @ return address
0408 .if \reload
0409 add r1, sp, #S_R0 + S_OFF @ pointer to regs
0410 ldmiacc r1, {r0 - r6} @ reload r0-r6
0411 stmiacc sp, {r4, r5} @ update stack arguments
0412 .endif
0413 ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine
0414 #endif
0415 .endm
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427 scno .req r7 @ syscall number
0428 tbl .req r8 @ syscall table pointer
0429 why .req r8 @ Linux syscall (!= 0)
0430 tsk .req r9 @ current thread_info
0431
0432 .macro do_overflow_check, frame_size:req
0433 #ifdef CONFIG_VMAP_STACK
0434 @
0435 @ Test whether the SP has overflowed. Task and IRQ stacks are aligned
0436 @ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
0437 @ zero.
0438 @
0439 ARM( tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
0440 THUMB( tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
0441 THUMB( it ne )
0442 bne .Lstack_overflow_check\@
0443
0444 .pushsection .text
0445 .Lstack_overflow_check\@:
0446 @
0447 @ The stack pointer is not pointing to a valid vmap'ed stack, but it
0448 @ may be pointing into the linear map instead, which may happen if we
0449 @ are already running from the overflow stack. We cannot detect overflow
0450 @ in such cases so just carry on.
0451 @
0452 str ip, [r0, #12] @ Stash IP on the mode stack
0453 ldr_va ip, high_memory @ Start of VMALLOC space
0454 ARM( cmp sp, ip ) @ SP in vmalloc space?
0455 THUMB( cmp r1, ip )
0456 THUMB( itt lo )
0457 ldrlo ip, [r0, #12] @ Restore IP
0458 blo .Lout\@ @ Carry on
0459
0460 THUMB( sub r1, sp, r1 ) @ Restore original R1
0461 THUMB( sub sp, r1 ) @ Restore original SP
0462 add sp, sp, #\frame_size @ Undo svc_entry's SP change
0463 b __bad_stack @ Handle VMAP stack overflow
0464 .popsection
0465 .Lout\@:
0466 #endif
0467 .endm