Back to home page

LXR

 
 

    


0001 /*
0002  *  linux/arch/arm/kernel/entry-armv.S
0003  *
0004  *  Copyright (C) 1996,1997,1998 Russell King.
0005  *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
0006  *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
0007  *
0008  * This program is free software; you can redistribute it and/or modify
0009  * it under the terms of the GNU General Public License version 2 as
0010  * published by the Free Software Foundation.
0011  *
0012  *  Low-level vector interface routines
0013  *
0014  *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
0015  *  that causes it to save wrong values...  Be aware!
0016  */
0017 
0018 #include <linux/init.h>
0019 
0020 #include <asm/assembler.h>
0021 #include <asm/memory.h>
0022 #include <asm/glue-df.h>
0023 #include <asm/glue-pf.h>
0024 #include <asm/vfpmacros.h>
0025 #ifndef CONFIG_MULTI_IRQ_HANDLER
0026 #include <mach/entry-macro.S>
0027 #endif
0028 #include <asm/thread_notify.h>
0029 #include <asm/unwind.h>
0030 #include <asm/unistd.h>
0031 #include <asm/tls.h>
0032 #include <asm/system_info.h>
0033 
0034 #include "entry-header.S"
0035 #include <asm/entry-macro-multi.S>
0036 #include <asm/probes.h>
0037 
0038 /*
0039  * Interrupt handling.
0040  */
0041     .macro  irq_handler
0042 #ifdef CONFIG_MULTI_IRQ_HANDLER
0043     ldr r1, =handle_arch_irq
0044     mov r0, sp
0045     badr    lr, 9997f
0046     ldr pc, [r1]
0047 #else
0048     arch_irq_handler_default
0049 #endif
0050 9997:
0051     .endm
0052 
0053     .macro  pabt_helper
0054     @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
0055 #ifdef MULTI_PABORT
0056     ldr ip, .LCprocfns
0057     mov lr, pc
0058     ldr pc, [ip, #PROCESSOR_PABT_FUNC]
0059 #else
0060     bl  CPU_PABORT_HANDLER
0061 #endif
0062     .endm
0063 
0064     .macro  dabt_helper
0065 
0066     @
0067     @ Call the processor-specific abort handler:
0068     @
0069     @  r2 - pt_regs
0070     @  r4 - aborted context pc
0071     @  r5 - aborted context psr
0072     @
0073     @ The abort handler must return the aborted address in r0, and
0074     @ the fault status register in r1.  r9 must be preserved.
0075     @
0076 #ifdef MULTI_DABORT
0077     ldr ip, .LCprocfns
0078     mov lr, pc
0079     ldr pc, [ip, #PROCESSOR_DABT_FUNC]
0080 #else
0081     bl  CPU_DABORT_HANDLER
0082 #endif
0083     .endm
0084 
0085 #ifdef CONFIG_KPROBES
0086     .section    .kprobes.text,"ax",%progbits
0087 #else
0088     .text
0089 #endif
0090 
0091 /*
0092  * Invalid mode handlers
0093  */
0094     .macro  inv_entry, reason
0095     sub sp, sp, #PT_REGS_SIZE
0096  ARM(   stmib   sp, {r1 - lr}       )
0097  THUMB( stmia   sp, {r0 - r12}      )
0098  THUMB( str sp, [sp, #S_SP]     )
0099  THUMB( str lr, [sp, #S_LR]     )
0100     mov r1, #\reason
0101     .endm
0102 
0103 __pabt_invalid:
0104     inv_entry BAD_PREFETCH
0105     b   common_invalid
0106 ENDPROC(__pabt_invalid)
0107 
0108 __dabt_invalid:
0109     inv_entry BAD_DATA
0110     b   common_invalid
0111 ENDPROC(__dabt_invalid)
0112 
0113 __irq_invalid:
0114     inv_entry BAD_IRQ
0115     b   common_invalid
0116 ENDPROC(__irq_invalid)
0117 
0118 __und_invalid:
0119     inv_entry BAD_UNDEFINSTR
0120 
0121     @
0122     @ XXX fall through to common_invalid
0123     @
0124 
0125 @
0126 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
0127 @
0128 common_invalid:
0129     zero_fp
0130 
0131     ldmia   r0, {r4 - r6}
0132     add r0, sp, #S_PC       @ here for interlock avoidance
0133     mov r7, #-1         @  ""   ""    ""        ""
0134     str r4, [sp]        @ save preserved r0
0135     stmia   r0, {r5 - r7}       @ lr_<exception>,
0136                     @ cpsr_<exception>, "old_r0"
0137 
0138     mov r0, sp
0139     b   bad_mode
0140 ENDPROC(__und_invalid)
0141 
0142 /*
0143  * SVC mode handlers
0144  */
0145 
0146 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
0147 #define SPFIX(code...) code
0148 #else
0149 #define SPFIX(code...)
0150 #endif
0151 
0152     .macro  svc_entry, stack_hole=0, trace=1, uaccess=1
0153  UNWIND(.fnstart        )
0154  UNWIND(.save {r0 - pc}     )
0155     sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
0156 #ifdef CONFIG_THUMB2_KERNEL
0157  SPFIX( str r0, [sp]    )   @ temporarily saved
0158  SPFIX( mov r0, sp      )
0159  SPFIX( tst r0, #4      )   @ test original stack alignment
0160  SPFIX( ldr r0, [sp]    )   @ restored
0161 #else
0162  SPFIX( tst sp, #4      )
0163 #endif
0164  SPFIX( subeq   sp, sp, #4  )
0165     stmia   sp, {r1 - r12}
0166 
0167     ldmia   r0, {r3 - r5}
0168     add r7, sp, #S_SP - 4   @ here for interlock avoidance
0169     mov r6, #-1         @  ""  ""      ""       ""
0170     add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
0171  SPFIX( addeq   r2, r2, #4  )
0172     str r3, [sp, #-4]!      @ save the "real" r0 copied
0173                     @ from the exception stack
0174 
0175     mov r3, lr
0176 
0177     @
0178     @ We are now ready to fill in the remaining blanks on the stack:
0179     @
0180     @  r2 - sp_svc
0181     @  r3 - lr_svc
0182     @  r4 - lr_<exception>, already fixed up for correct return/restart
0183     @  r5 - spsr_<exception>
0184     @  r6 - orig_r0 (see pt_regs definition in ptrace.h)
0185     @
0186     stmia   r7, {r2 - r6}
0187 
0188     get_thread_info tsk
0189     ldr r0, [tsk, #TI_ADDR_LIMIT]
0190     mov r1, #TASK_SIZE
0191     str r1, [tsk, #TI_ADDR_LIMIT]
0192     str r0, [sp, #SVC_ADDR_LIMIT]
0193 
0194     uaccess_save r0
0195     .if \uaccess
0196     uaccess_disable r0
0197     .endif
0198 
0199     .if \trace
0200 #ifdef CONFIG_TRACE_IRQFLAGS
0201     bl  trace_hardirqs_off
0202 #endif
0203     .endif
0204     .endm
0205 
0206     .align  5
0207 __dabt_svc:
0208     svc_entry uaccess=0
0209     mov r2, sp
0210     dabt_helper
0211  THUMB( ldr r5, [sp, #S_PSR]    )   @ potentially updated CPSR
0212     svc_exit r5             @ return from exception
0213  UNWIND(.fnend      )
0214 ENDPROC(__dabt_svc)
0215 
0216     .align  5
0217 __irq_svc:
0218     svc_entry
0219     irq_handler
0220 
0221 #ifdef CONFIG_PREEMPT
0222     ldr r8, [tsk, #TI_PREEMPT]      @ get preempt count
0223     ldr r0, [tsk, #TI_FLAGS]        @ get flags
0224     teq r8, #0              @ if preempt count != 0
0225     movne   r0, #0              @ force flags to 0
0226     tst r0, #_TIF_NEED_RESCHED
0227     blne    svc_preempt
0228 #endif
0229 
0230     svc_exit r5, irq = 1            @ return from exception
0231  UNWIND(.fnend      )
0232 ENDPROC(__irq_svc)
0233 
0234     .ltorg
0235 
0236 #ifdef CONFIG_PREEMPT
0237 svc_preempt:
0238     mov r8, lr
0239 1:  bl  preempt_schedule_irq        @ irq en/disable is done inside
0240     ldr r0, [tsk, #TI_FLAGS]        @ get new tasks TI_FLAGS
0241     tst r0, #_TIF_NEED_RESCHED
0242     reteq   r8              @ go again
0243     b   1b
0244 #endif
0245 
0246 __und_fault:
0247     @ Correct the PC such that it is pointing at the instruction
0248     @ which caused the fault.  If the faulting instruction was ARM
0249     @ the PC will be pointing at the next instruction, and have to
0250     @ subtract 4.  Otherwise, it is Thumb, and the PC will be
0251     @ pointing at the second half of the Thumb instruction.  We
0252     @ have to subtract 2.
0253     ldr r2, [r0, #S_PC]
0254     sub r2, r2, r1
0255     str r2, [r0, #S_PC]
0256     b   do_undefinstr
0257 ENDPROC(__und_fault)
0258 
0259     .align  5
0260 __und_svc:
0261 #ifdef CONFIG_KPROBES
0262     @ If a kprobe is about to simulate a "stmdb sp..." instruction,
0263     @ it obviously needs free stack space which then will belong to
0264     @ the saved context.
0265     svc_entry MAX_STACK_SIZE
0266 #else
0267     svc_entry
0268 #endif
0269     @
0270     @ call emulation code, which returns using r9 if it has emulated
0271     @ the instruction, or the more conventional lr if we are to treat
0272     @ this as a real undefined instruction
0273     @
0274     @  r0 - instruction
0275     @
0276 #ifndef CONFIG_THUMB2_KERNEL
0277     ldr r0, [r4, #-4]
0278 #else
0279     mov r1, #2
0280     ldrh    r0, [r4, #-2]           @ Thumb instruction at LR - 2
0281     cmp r0, #0xe800         @ 32-bit instruction if xx >= 0
0282     blo __und_svc_fault
0283     ldrh    r9, [r4]            @ bottom 16 bits
0284     add r4, r4, #2
0285     str r4, [sp, #S_PC]
0286     orr r0, r9, r0, lsl #16
0287 #endif
0288     badr    r9, __und_svc_finish
0289     mov r2, r4
0290     bl  call_fpe
0291 
0292     mov r1, #4              @ PC correction to apply
0293 __und_svc_fault:
0294     mov r0, sp              @ struct pt_regs *regs
0295     bl  __und_fault
0296 
0297 __und_svc_finish:
0298     get_thread_info tsk
0299     ldr r5, [sp, #S_PSR]        @ Get SVC cpsr
0300     svc_exit r5             @ return from exception
0301  UNWIND(.fnend      )
0302 ENDPROC(__und_svc)
0303 
0304     .align  5
0305 __pabt_svc:
0306     svc_entry
0307     mov r2, sp              @ regs
0308     pabt_helper
0309     svc_exit r5             @ return from exception
0310  UNWIND(.fnend      )
0311 ENDPROC(__pabt_svc)
0312 
0313     .align  5
0314 __fiq_svc:
0315     svc_entry trace=0
0316     mov r0, sp              @ struct pt_regs *regs
0317     bl  handle_fiq_as_nmi
0318     svc_exit_via_fiq
0319  UNWIND(.fnend      )
0320 ENDPROC(__fiq_svc)
0321 
0322     .align  5
0323 .LCcralign:
0324     .word   cr_alignment
0325 #ifdef MULTI_DABORT
0326 .LCprocfns:
0327     .word   processor
0328 #endif
0329 .LCfp:
0330     .word   fp_enter
0331 
0332 /*
0333  * Abort mode handlers
0334  */
0335 
0336 @
0337 @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
0338 @ and reuses the same macros. However in abort mode we must also
0339 @ save/restore lr_abt and spsr_abt to make nested aborts safe.
0340 @
0341     .align 5
0342 __fiq_abt:
0343     svc_entry trace=0
0344 
0345  ARM(   msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
0346  THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
0347  THUMB( msr cpsr_c, r0 )
0348     mov r1, lr      @ Save lr_abt
0349     mrs r2, spsr    @ Save spsr_abt, abort is now safe
0350  ARM(   msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
0351  THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
0352  THUMB( msr cpsr_c, r0 )
0353     stmfd   sp!, {r1 - r2}
0354 
0355     add r0, sp, #8          @ struct pt_regs *regs
0356     bl  handle_fiq_as_nmi
0357 
0358     ldmfd   sp!, {r1 - r2}
0359  ARM(   msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
0360  THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
0361  THUMB( msr cpsr_c, r0 )
0362     mov lr, r1      @ Restore lr_abt, abort is unsafe
0363     msr spsr_cxsf, r2   @ Restore spsr_abt
0364  ARM(   msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
0365  THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
0366  THUMB( msr cpsr_c, r0 )
0367 
0368     svc_exit_via_fiq
0369  UNWIND(.fnend      )
0370 ENDPROC(__fiq_abt)
0371 
0372 /*
0373  * User mode handlers
0374  *
0375  * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
0376  */
0377 
0378 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
0379 #error "sizeof(struct pt_regs) must be a multiple of 8"
0380 #endif
0381 
0382     .macro  usr_entry, trace=1, uaccess=1
0383  UNWIND(.fnstart    )
0384  UNWIND(.cantunwind )   @ don't unwind the user space
0385     sub sp, sp, #PT_REGS_SIZE
0386  ARM(   stmib   sp, {r1 - r12}  )
0387  THUMB( stmia   sp, {r0 - r12}  )
0388 
0389  ATRAP( mrc p15, 0, r7, c1, c0, 0)
0390  ATRAP( ldr r8, .LCcralign)
0391 
0392     ldmia   r0, {r3 - r5}
0393     add r0, sp, #S_PC       @ here for interlock avoidance
0394     mov r6, #-1         @  ""  ""     ""        ""
0395 
0396     str r3, [sp]        @ save the "real" r0 copied
0397                     @ from the exception stack
0398 
0399  ATRAP( ldr r8, [r8, #0])
0400 
0401     @
0402     @ We are now ready to fill in the remaining blanks on the stack:
0403     @
0404     @  r4 - lr_<exception>, already fixed up for correct return/restart
0405     @  r5 - spsr_<exception>
0406     @  r6 - orig_r0 (see pt_regs definition in ptrace.h)
0407     @
0408     @ Also, separately save sp_usr and lr_usr
0409     @
0410     stmia   r0, {r4 - r6}
0411  ARM(   stmdb   r0, {sp, lr}^           )
0412  THUMB( store_user_sp_lr r0, r1, S_SP - S_PC    )
0413 
0414     .if \uaccess
0415     uaccess_disable ip
0416     .endif
0417 
0418     @ Enable the alignment trap while in kernel mode
0419  ATRAP( teq r8, r7)
0420  ATRAP( mcrne   p15, 0, r8, c1, c0, 0)
0421 
0422     @
0423     @ Clear FP to mark the first stack frame
0424     @
0425     zero_fp
0426 
0427     .if \trace
0428 #ifdef CONFIG_TRACE_IRQFLAGS
0429     bl  trace_hardirqs_off
0430 #endif
0431     ct_user_exit save = 0
0432     .endif
0433     .endm
0434 
0435     .macro  kuser_cmpxchg_check
0436 #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
0437 #ifndef CONFIG_MMU
0438 #warning "NPTL on non MMU needs fixing"
0439 #else
0440     @ Make sure our user space atomic helper is restarted
0441     @ if it was interrupted in a critical region.  Here we
0442     @ perform a quick test inline since it should be false
0443     @ 99.9999% of the time.  The rest is done out of line.
0444     cmp r4, #TASK_SIZE
0445     blhs    kuser_cmpxchg64_fixup
0446 #endif
0447 #endif
0448     .endm
0449 
0450     .align  5
0451 __dabt_usr:
0452     usr_entry uaccess=0
0453     kuser_cmpxchg_check
0454     mov r2, sp
0455     dabt_helper
0456     b   ret_from_exception
0457  UNWIND(.fnend      )
0458 ENDPROC(__dabt_usr)
0459 
0460     .align  5
0461 __irq_usr:
0462     usr_entry
0463     kuser_cmpxchg_check
0464     irq_handler
0465     get_thread_info tsk
0466     mov why, #0
0467     b   ret_to_user_from_irq
0468  UNWIND(.fnend      )
0469 ENDPROC(__irq_usr)
0470 
0471     .ltorg
0472 
0473     .align  5
0474 __und_usr:
0475     usr_entry uaccess=0
0476 
0477     mov r2, r4
0478     mov r3, r5
0479 
0480     @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
0481     @      faulting instruction depending on Thumb mode.
0482     @ r3 = regs->ARM_cpsr
0483     @
0484     @ The emulation code returns using r9 if it has emulated the
0485     @ instruction, or the more conventional lr if we are to treat
0486     @ this as a real undefined instruction
0487     @
0488     badr    r9, ret_from_exception
0489 
0490     @ IRQs must be enabled before attempting to read the instruction from
0491     @ user space since that could cause a page/translation fault if the
0492     @ page table was modified by another CPU.
0493     enable_irq
0494 
0495     tst r3, #PSR_T_BIT          @ Thumb mode?
0496     bne __und_usr_thumb
0497     sub r4, r2, #4          @ ARM instr at LR - 4
0498 1:  ldrt    r0, [r4]
0499  ARM_BE8(rev    r0, r0)             @ little endian instruction
0500 
0501     uaccess_disable ip
0502 
0503     @ r0 = 32-bit ARM instruction which caused the exception
0504     @ r2 = PC value for the following instruction (:= regs->ARM_pc)
0505     @ r4 = PC value for the faulting instruction
0506     @ lr = 32-bit undefined instruction function
0507     badr    lr, __und_usr_fault_32
0508     b   call_fpe
0509 
0510 __und_usr_thumb:
0511     @ Thumb instruction
0512     sub r4, r2, #2          @ First half of thumb instr at LR - 2
0513 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
0514 /*
0515  * Thumb-2 instruction handling.  Note that because pre-v6 and >= v6 platforms
0516  * can never be supported in a single kernel, this code is not applicable at
0517  * all when __LINUX_ARM_ARCH__ < 6.  This allows simplifying assumptions to be
0518  * made about .arch directives.
0519  */
0520 #if __LINUX_ARM_ARCH__ < 7
0521 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
0522 #define NEED_CPU_ARCHITECTURE
0523     ldr r5, .LCcpu_architecture
0524     ldr r5, [r5]
0525     cmp r5, #CPU_ARCH_ARMv7
0526     blo __und_usr_fault_16      @ 16bit undefined instruction
0527 /*
0528  * The following code won't get run unless the running CPU really is v7, so
0529  * coding round the lack of ldrht on older arches is pointless.  Temporarily
0530  * override the assembler target arch with the minimum required instead:
0531  */
0532     .arch   armv6t2
0533 #endif
0534 2:  ldrht   r5, [r4]
0535 ARM_BE8(rev16   r5, r5)             @ little endian instruction
0536     cmp r5, #0xe800         @ 32bit instruction if xx != 0
0537     blo __und_usr_fault_16_pan      @ 16bit undefined instruction
0538 3:  ldrht   r0, [r2]
0539 ARM_BE8(rev16   r0, r0)             @ little endian instruction
0540     uaccess_disable ip
0541     add r2, r2, #2          @ r2 is PC + 2, make it PC + 4
0542     str r2, [sp, #S_PC]         @ it's a 2x16bit instr, update
0543     orr r0, r0, r5, lsl #16
0544     badr    lr, __und_usr_fault_32
0545     @ r0 = the two 16-bit Thumb instructions which caused the exception
0546     @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
0547     @ r4 = PC value for the first 16-bit Thumb instruction
0548     @ lr = 32bit undefined instruction function
0549 
0550 #if __LINUX_ARM_ARCH__ < 7
0551 /* If the target arch was overridden, change it back: */
0552 #ifdef CONFIG_CPU_32v6K
0553     .arch   armv6k
0554 #else
0555     .arch   armv6
0556 #endif
0557 #endif /* __LINUX_ARM_ARCH__ < 7 */
0558 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
0559     b   __und_usr_fault_16
0560 #endif
0561  UNWIND(.fnend)
0562 ENDPROC(__und_usr)
0563 
0564 /*
0565  * The out of line fixup for the ldrt instructions above.
0566  */
0567     .pushsection .text.fixup, "ax"
0568     .align  2
0569 4:  str     r4, [sp, #S_PC]         @ retry current instruction
0570     ret r9
0571     .popsection
0572     .pushsection __ex_table,"a"
0573     .long   1b, 4b
0574 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
0575     .long   2b, 4b
0576     .long   3b, 4b
0577 #endif
0578     .popsection
0579 
0580 /*
0581  * Check whether the instruction is a co-processor instruction.
0582  * If yes, we need to call the relevant co-processor handler.
0583  *
0584  * Note that we don't do a full check here for the co-processor
0585  * instructions; all instructions with bit 27 set are well
0586  * defined.  The only instructions that should fault are the
0587  * co-processor instructions.  However, we have to watch out
0588  * for the ARM6/ARM7 SWI bug.
0589  *
0590  * NEON is a special case that has to be handled here. Not all
0591  * NEON instructions are co-processor instructions, so we have
0592  * to make a special case of checking for them. Plus, there's
0593  * five groups of them, so we have a table of mask/opcode pairs
0594  * to check against, and if any match then we branch off into the
0595  * NEON handler code.
0596  *
0597  * Emulators may wish to make use of the following registers:
0598  *  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
0599  *  r2  = PC value to resume execution after successful emulation
0600  *  r9  = normal "successful" return address
0601  *  r10 = this threads thread_info structure
0602  *  lr  = unrecognised instruction return address
0603  * IRQs enabled, FIQs enabled.
0604  */
0605     @
0606     @ Fall-through from Thumb-2 __und_usr
0607     @
0608 #ifdef CONFIG_NEON
0609     get_thread_info r10         @ get current thread
0610     adr r6, .LCneon_thumb_opcodes
0611     b   2f
0612 #endif
0613 call_fpe:
0614     get_thread_info r10         @ get current thread
0615 #ifdef CONFIG_NEON
0616     adr r6, .LCneon_arm_opcodes
0617 2:  ldr r5, [r6], #4            @ mask value
0618     ldr r7, [r6], #4            @ opcode bits matching in mask
0619     cmp r5, #0              @ end mask?
0620     beq 1f
0621     and r8, r0, r5
0622     cmp r8, r7              @ NEON instruction?
0623     bne 2b
0624     mov r7, #1
0625     strb    r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
0626     strb    r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
0627     b   do_vfp              @ let VFP handler handle this
0628 1:
0629 #endif
0630     tst r0, #0x08000000         @ only CDP/CPRT/LDC/STC have bit 27
0631     tstne   r0, #0x04000000         @ bit 26 set on both ARM and Thumb-2
0632     reteq   lr
0633     and r8, r0, #0x00000f00     @ mask out CP number
0634  THUMB( lsr r8, r8, #8      )
0635     mov r7, #1
0636     add r6, r10, #TI_USED_CP
0637  ARM(   strb    r7, [r6, r8, lsr #8]    )   @ set appropriate used_cp[]
0638  THUMB( strb    r7, [r6, r8]        )   @ set appropriate used_cp[]
0639 #ifdef CONFIG_IWMMXT
0640     @ Test if we need to give access to iWMMXt coprocessors
0641     ldr r5, [r10, #TI_FLAGS]
0642     rsbs    r7, r8, #(1 << 8)       @ CP 0 or 1 only
0643     movcss  r7, r5, lsr #(TIF_USING_IWMMXT + 1)
0644     bcs iwmmxt_task_enable
0645 #endif
0646  ARM(   add pc, pc, r8, lsr #6  )
0647  THUMB( lsl r8, r8, #2      )
0648  THUMB( add pc, r8          )
0649     nop
0650 
0651     ret.w   lr              @ CP#0
0652     W(b)    do_fpe              @ CP#1 (FPE)
0653     W(b)    do_fpe              @ CP#2 (FPE)
0654     ret.w   lr              @ CP#3
0655 #ifdef CONFIG_CRUNCH
0656     b   crunch_task_enable      @ CP#4 (MaverickCrunch)
0657     b   crunch_task_enable      @ CP#5 (MaverickCrunch)
0658     b   crunch_task_enable      @ CP#6 (MaverickCrunch)
0659 #else
0660     ret.w   lr              @ CP#4
0661     ret.w   lr              @ CP#5
0662     ret.w   lr              @ CP#6
0663 #endif
0664     ret.w   lr              @ CP#7
0665     ret.w   lr              @ CP#8
0666     ret.w   lr              @ CP#9
0667 #ifdef CONFIG_VFP
0668     W(b)    do_vfp              @ CP#10 (VFP)
0669     W(b)    do_vfp              @ CP#11 (VFP)
0670 #else
0671     ret.w   lr              @ CP#10 (VFP)
0672     ret.w   lr              @ CP#11 (VFP)
0673 #endif
0674     ret.w   lr              @ CP#12
0675     ret.w   lr              @ CP#13
0676     ret.w   lr              @ CP#14 (Debug)
0677     ret.w   lr              @ CP#15 (Control)
0678 
0679 #ifdef NEED_CPU_ARCHITECTURE
0680     .align  2
0681 .LCcpu_architecture:
0682     .word   __cpu_architecture
0683 #endif
0684 
0685 #ifdef CONFIG_NEON
0686     .align  6
0687 
0688 .LCneon_arm_opcodes:
0689     .word   0xfe000000          @ mask
0690     .word   0xf2000000          @ opcode
0691 
0692     .word   0xff100000          @ mask
0693     .word   0xf4000000          @ opcode
0694 
0695     .word   0x00000000          @ mask
0696     .word   0x00000000          @ opcode
0697 
0698 .LCneon_thumb_opcodes:
0699     .word   0xef000000          @ mask
0700     .word   0xef000000          @ opcode
0701 
0702     .word   0xff100000          @ mask
0703     .word   0xf9000000          @ opcode
0704 
0705     .word   0x00000000          @ mask
0706     .word   0x00000000          @ opcode
0707 #endif
0708 
0709 do_fpe:
0710     ldr r4, .LCfp
0711     add r10, r10, #TI_FPSTATE       @ r10 = workspace
0712     ldr pc, [r4]            @ Call FP module USR entry point
0713 
0714 /*
0715  * The FP module is called with these registers set:
0716  *  r0  = instruction
0717  *  r2  = PC+4
0718  *  r9  = normal "successful" return address
0719  *  r10 = FP workspace
0720  *  lr  = unrecognised FP instruction return address
0721  */
0722 
0723     .pushsection .data
0724 ENTRY(fp_enter)
0725     .word   no_fp
0726     .popsection
0727 
0728 ENTRY(no_fp)
0729     ret lr
0730 ENDPROC(no_fp)
0731 
0732 __und_usr_fault_32:
0733     mov r1, #4
0734     b   1f
0735 __und_usr_fault_16_pan:
0736     uaccess_disable ip
0737 __und_usr_fault_16:
0738     mov r1, #2
0739 1:  mov r0, sp
0740     badr    lr, ret_from_exception
0741     b   __und_fault
0742 ENDPROC(__und_usr_fault_32)
0743 ENDPROC(__und_usr_fault_16)
0744 
0745     .align  5
0746 __pabt_usr:
0747     usr_entry
0748     mov r2, sp              @ regs
0749     pabt_helper
0750  UNWIND(.fnend      )
0751     /* fall through */
0752 /*
0753  * This is the return code to user mode for abort handlers
0754  */
0755 ENTRY(ret_from_exception)
0756  UNWIND(.fnstart    )
0757  UNWIND(.cantunwind )
0758     get_thread_info tsk
0759     mov why, #0
0760     b   ret_to_user
0761  UNWIND(.fnend      )
0762 ENDPROC(__pabt_usr)
0763 ENDPROC(ret_from_exception)
0764 
0765     .align  5
0766 __fiq_usr:
0767     usr_entry trace=0
0768     kuser_cmpxchg_check
0769     mov r0, sp              @ struct pt_regs *regs
0770     bl  handle_fiq_as_nmi
0771     get_thread_info tsk
0772     restore_user_regs fast = 0, offset = 0
0773  UNWIND(.fnend      )
0774 ENDPROC(__fiq_usr)
0775 
0776 /*
0777  * Register switch for ARMv3 and ARMv4 processors
0778  * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
0779  * previous and next are guaranteed not to be the same.
0780  */
0781 ENTRY(__switch_to)
0782  UNWIND(.fnstart    )
0783  UNWIND(.cantunwind )
0784     add ip, r1, #TI_CPU_SAVE
0785  ARM(   stmia   ip!, {r4 - sl, fp, sp, lr} )    @ Store most regs on stack
0786  THUMB( stmia   ip!, {r4 - sl, fp}     )    @ Store most regs on stack
0787  THUMB( str sp, [ip], #4           )
0788  THUMB( str lr, [ip], #4           )
0789     ldr r4, [r2, #TI_TP_VALUE]
0790     ldr r5, [r2, #TI_TP_VALUE + 4]
0791 #ifdef CONFIG_CPU_USE_DOMAINS
0792     mrc p15, 0, r6, c3, c0, 0       @ Get domain register
0793     str r6, [r1, #TI_CPU_DOMAIN]    @ Save old domain register
0794     ldr r6, [r2, #TI_CPU_DOMAIN]
0795 #endif
0796     switch_tls r1, r4, r5, r3, r7
0797 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
0798     ldr r7, [r2, #TI_TASK]
0799     ldr r8, =__stack_chk_guard
0800     ldr r7, [r7, #TSK_STACK_CANARY]
0801 #endif
0802 #ifdef CONFIG_CPU_USE_DOMAINS
0803     mcr p15, 0, r6, c3, c0, 0       @ Set domain register
0804 #endif
0805     mov r5, r0
0806     add r4, r2, #TI_CPU_SAVE
0807     ldr r0, =thread_notify_head
0808     mov r1, #THREAD_NOTIFY_SWITCH
0809     bl  atomic_notifier_call_chain
0810 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
0811     str r7, [r8]
0812 #endif
0813  THUMB( mov ip, r4             )
0814     mov r0, r5
0815  ARM(   ldmia   r4, {r4 - sl, fp, sp, pc}  )    @ Load all regs saved previously
0816  THUMB( ldmia   ip!, {r4 - sl, fp}     )    @ Load all regs saved previously
0817  THUMB( ldr sp, [ip], #4           )
0818  THUMB( ldr pc, [ip]           )
0819  UNWIND(.fnend      )
0820 ENDPROC(__switch_to)
0821 
0822     __INIT
0823 
0824 /*
0825  * User helpers.
0826  *
0827  * Each segment is 32-byte aligned and will be moved to the top of the high
0828  * vector page.  New segments (if ever needed) must be added in front of
0829  * existing ones.  This mechanism should be used only for things that are
0830  * really small and justified, and not be abused freely.
0831  *
0832  * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
0833  */
0834  THUMB( .arm    )
0835 
0836     .macro  usr_ret, reg
0837 #ifdef CONFIG_ARM_THUMB
0838     bx  \reg
0839 #else
0840     ret \reg
0841 #endif
0842     .endm
0843 
0844     .macro  kuser_pad, sym, size
0845     .if (. - \sym) & 3
0846     .rept   4 - (. - \sym) & 3
0847     .byte   0
0848     .endr
0849     .endif
0850     .rept   (\size - (. - \sym)) / 4
0851     .word   0xe7fddef1
0852     .endr
0853     .endm
0854 
0855 #ifdef CONFIG_KUSER_HELPERS
0856     .align  5
0857     .globl  __kuser_helper_start
0858 __kuser_helper_start:
0859 
0860 /*
0861  * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
0862  * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
0863  */
0864 
0865 __kuser_cmpxchg64:              @ 0xffff0f60
0866 
0867 #if defined(CONFIG_CPU_32v6K)
0868 
0869     stmfd   sp!, {r4, r5, r6, r7}
0870     ldrd    r4, r5, [r0]            @ load old val
0871     ldrd    r6, r7, [r1]            @ load new val
0872     smp_dmb arm
0873 1:  ldrexd  r0, r1, [r2]            @ load current val
0874     eors    r3, r0, r4          @ compare with oldval (1)
0875     eoreqs  r3, r1, r5          @ compare with oldval (2)
0876     strexdeq r3, r6, r7, [r2]       @ store newval if eq
0877     teqeq   r3, #1              @ success?
0878     beq 1b              @ if no then retry
0879     smp_dmb arm
0880     rsbs    r0, r3, #0          @ set returned val and C flag
0881     ldmfd   sp!, {r4, r5, r6, r7}
0882     usr_ret lr
0883 
0884 #elif !defined(CONFIG_SMP)
0885 
0886 #ifdef CONFIG_MMU
0887 
0888     /*
0889      * The only thing that can break atomicity in this cmpxchg64
0890      * implementation is either an IRQ or a data abort exception
0891      * causing another process/thread to be scheduled in the middle of
0892      * the critical sequence.  The same strategy as for cmpxchg is used.
0893      */
0894     stmfd   sp!, {r4, r5, r6, lr}
0895     ldmia   r0, {r4, r5}            @ load old val
0896     ldmia   r1, {r6, lr}            @ load new val
0897 1:  ldmia   r2, {r0, r1}            @ load current val
0898     eors    r3, r0, r4          @ compare with oldval (1)
0899     eoreqs  r3, r1, r5          @ compare with oldval (2)
0900 2:  stmeqia r2, {r6, lr}            @ store newval if eq
0901     rsbs    r0, r3, #0          @ set return val and C flag
0902     ldmfd   sp!, {r4, r5, r6, pc}
0903 
0904     .text
0905 kuser_cmpxchg64_fixup:
0906     @ Called from kuser_cmpxchg_fixup.
0907     @ r4 = address of interrupted insn (must be preserved).
0908     @ sp = saved regs. r7 and r8 are clobbered.
0909     @ 1b = first critical insn, 2b = last critical insn.
0910     @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
0911     mov r7, #0xffff0fff
0912     sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
0913     subs    r8, r4, r7
0914     rsbcss  r8, r8, #(2b - 1b)
0915     strcs   r7, [sp, #S_PC]
0916 #if __LINUX_ARM_ARCH__ < 6
0917     bcc kuser_cmpxchg32_fixup
0918 #endif
0919     ret lr
0920     .previous
0921 
0922 #else
0923 #warning "NPTL on non MMU needs fixing"
0924     mov r0, #-1
0925     adds    r0, r0, #0
0926     usr_ret lr
0927 #endif
0928 
0929 #else
0930 #error "incoherent kernel configuration"
0931 #endif
0932 
0933     kuser_pad __kuser_cmpxchg64, 64
0934 
0935 __kuser_memory_barrier:             @ 0xffff0fa0
0936     smp_dmb arm
0937     usr_ret lr
0938 
0939     kuser_pad __kuser_memory_barrier, 32
0940 
0941 __kuser_cmpxchg:                @ 0xffff0fc0
0942 
0943 #if __LINUX_ARM_ARCH__ < 6
0944 
0945 #ifdef CONFIG_MMU
0946 
0947     /*
0948      * The only thing that can break atomicity in this cmpxchg
0949      * implementation is either an IRQ or a data abort exception
0950      * causing another process/thread to be scheduled in the middle
0951      * of the critical sequence.  To prevent this, code is added to
0952      * the IRQ and data abort exception handlers to set the pc back
0953      * to the beginning of the critical section if it is found to be
0954      * within that critical section (see kuser_cmpxchg_fixup).
0955      */
0956 1:  ldr r3, [r2]            @ load current val
0957     subs    r3, r3, r0          @ compare with oldval
0958 2:  streq   r1, [r2]            @ store newval if eq
0959     rsbs    r0, r3, #0          @ set return val and C flag
0960     usr_ret lr
0961 
0962     .text
0963 kuser_cmpxchg32_fixup:
0964     @ Called from kuser_cmpxchg_check macro.
0965     @ r4 = address of interrupted insn (must be preserved).
0966     @ sp = saved regs. r7 and r8 are clobbered.
0967     @ 1b = first critical insn, 2b = last critical insn.
0968     @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
0969     mov r7, #0xffff0fff
0970     sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
0971     subs    r8, r4, r7
0972     rsbcss  r8, r8, #(2b - 1b)
0973     strcs   r7, [sp, #S_PC]
0974     ret lr
0975     .previous
0976 
0977 #else
0978 #warning "NPTL on non MMU needs fixing"
0979     mov r0, #-1
0980     adds    r0, r0, #0
0981     usr_ret lr
0982 #endif
0983 
0984 #else
0985 
0986     smp_dmb arm
0987 1:  ldrex   r3, [r2]
0988     subs    r3, r3, r0
0989     strexeq r3, r1, [r2]
0990     teqeq   r3, #1
0991     beq 1b
0992     rsbs    r0, r3, #0
0993     /* beware -- each __kuser slot must be 8 instructions max */
0994     ALT_SMP(b   __kuser_memory_barrier)
0995     ALT_UP(usr_ret  lr)
0996 
0997 #endif
0998 
0999     kuser_pad __kuser_cmpxchg, 32
1000 
1001 __kuser_get_tls:                @ 0xffff0fe0
1002     ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
1003     usr_ret lr
1004     mrc p15, 0, r0, c13, c0, 3  @ 0xffff0fe8 hardware TLS code
1005     kuser_pad __kuser_get_tls, 16
1006     .rep    3
1007     .word   0           @ 0xffff0ff0 software TLS value, then
1008     .endr               @ pad up to __kuser_helper_version
1009 
1010 __kuser_helper_version:             @ 0xffff0ffc
1011     .word   ((__kuser_helper_end - __kuser_helper_start) >> 5)
1012 
1013     .globl  __kuser_helper_end
1014 __kuser_helper_end:
1015 
1016 #endif
1017 
1018  THUMB( .thumb  )
1019 
1020 /*
1021  * Vector stubs.
1022  *
1023  * This code is copied to 0xffff1000 so we can use branches in the
1024  * vectors, rather than ldr's.  Note that this code must not exceed
1025  * a page size.
1026  *
1027  * Common stub entry macro:
1028  *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1029  *
1030  * SP points to a minimal amount of processor-private memory, the address
1031  * of which is copied into r0 for the mode specific abort handler.
1032  */
1033     .macro  vector_stub, name, mode, correction=0
1034     .align  5
1035 
1036 vector_\name:
1037     .if \correction
1038     sub lr, lr, #\correction
1039     .endif
1040 
1041     @
1042     @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1043     @ (parent CPSR)
1044     @
1045     stmia   sp, {r0, lr}        @ save r0, lr
1046     mrs lr, spsr
1047     str lr, [sp, #8]        @ save spsr
1048 
1049     @
1050     @ Prepare for SVC32 mode.  IRQs remain disabled.
1051     @
1052     mrs r0, cpsr
1053     eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1054     msr spsr_cxsf, r0
1055 
1056     @
1057     @ the branch table must immediately follow this code
1058     @
1059     and lr, lr, #0x0f
1060  THUMB( adr r0, 1f          )
1061  THUMB( ldr lr, [r0, lr, lsl #2]    )
1062     mov r0, sp
1063  ARM(   ldr lr, [pc, lr, lsl #2]    )
1064     movs    pc, lr          @ branch to handler in SVC mode
1065 ENDPROC(vector_\name)
1066 
1067     .align  2
1068     @ handler addresses follow this label
1069 1:
1070     .endm
1071 
1072     .section .stubs, "ax", %progbits
1073     @ This must be the first word
1074     .word   vector_swi
1075 
1076 vector_rst:
1077  ARM(   swi SYS_ERROR0  )
1078  THUMB( svc #0      )
1079  THUMB( nop         )
1080     b   vector_und
1081 
1082 /*
1083  * Interrupt dispatcher
1084  */
1085     vector_stub irq, IRQ_MODE, 4
1086 
1087     .long   __irq_usr           @  0  (USR_26 / USR_32)
1088     .long   __irq_invalid           @  1  (FIQ_26 / FIQ_32)
1089     .long   __irq_invalid           @  2  (IRQ_26 / IRQ_32)
1090     .long   __irq_svc           @  3  (SVC_26 / SVC_32)
1091     .long   __irq_invalid           @  4
1092     .long   __irq_invalid           @  5
1093     .long   __irq_invalid           @  6
1094     .long   __irq_invalid           @  7
1095     .long   __irq_invalid           @  8
1096     .long   __irq_invalid           @  9
1097     .long   __irq_invalid           @  a
1098     .long   __irq_invalid           @  b
1099     .long   __irq_invalid           @  c
1100     .long   __irq_invalid           @  d
1101     .long   __irq_invalid           @  e
1102     .long   __irq_invalid           @  f
1103 
1104 /*
1105  * Data abort dispatcher
1106  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1107  */
1108     vector_stub dabt, ABT_MODE, 8
1109 
1110     .long   __dabt_usr          @  0  (USR_26 / USR_32)
1111     .long   __dabt_invalid          @  1  (FIQ_26 / FIQ_32)
1112     .long   __dabt_invalid          @  2  (IRQ_26 / IRQ_32)
1113     .long   __dabt_svc          @  3  (SVC_26 / SVC_32)
1114     .long   __dabt_invalid          @  4
1115     .long   __dabt_invalid          @  5
1116     .long   __dabt_invalid          @  6
1117     .long   __dabt_invalid          @  7
1118     .long   __dabt_invalid          @  8
1119     .long   __dabt_invalid          @  9
1120     .long   __dabt_invalid          @  a
1121     .long   __dabt_invalid          @  b
1122     .long   __dabt_invalid          @  c
1123     .long   __dabt_invalid          @  d
1124     .long   __dabt_invalid          @  e
1125     .long   __dabt_invalid          @  f
1126 
1127 /*
1128  * Prefetch abort dispatcher
1129  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1130  */
1131     vector_stub pabt, ABT_MODE, 4
1132 
1133     .long   __pabt_usr          @  0 (USR_26 / USR_32)
1134     .long   __pabt_invalid          @  1 (FIQ_26 / FIQ_32)
1135     .long   __pabt_invalid          @  2 (IRQ_26 / IRQ_32)
1136     .long   __pabt_svc          @  3 (SVC_26 / SVC_32)
1137     .long   __pabt_invalid          @  4
1138     .long   __pabt_invalid          @  5
1139     .long   __pabt_invalid          @  6
1140     .long   __pabt_invalid          @  7
1141     .long   __pabt_invalid          @  8
1142     .long   __pabt_invalid          @  9
1143     .long   __pabt_invalid          @  a
1144     .long   __pabt_invalid          @  b
1145     .long   __pabt_invalid          @  c
1146     .long   __pabt_invalid          @  d
1147     .long   __pabt_invalid          @  e
1148     .long   __pabt_invalid          @  f
1149 
1150 /*
1151  * Undef instr entry dispatcher
1152  * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1153  */
1154     vector_stub und, UND_MODE
1155 
1156     .long   __und_usr           @  0 (USR_26 / USR_32)
1157     .long   __und_invalid           @  1 (FIQ_26 / FIQ_32)
1158     .long   __und_invalid           @  2 (IRQ_26 / IRQ_32)
1159     .long   __und_svc           @  3 (SVC_26 / SVC_32)
1160     .long   __und_invalid           @  4
1161     .long   __und_invalid           @  5
1162     .long   __und_invalid           @  6
1163     .long   __und_invalid           @  7
1164     .long   __und_invalid           @  8
1165     .long   __und_invalid           @  9
1166     .long   __und_invalid           @  a
1167     .long   __und_invalid           @  b
1168     .long   __und_invalid           @  c
1169     .long   __und_invalid           @  d
1170     .long   __und_invalid           @  e
1171     .long   __und_invalid           @  f
1172 
1173     .align  5
1174 
1175 /*=============================================================================
1176  * Address exception handler
1177  *-----------------------------------------------------------------------------
1178  * These aren't too critical.
1179  * (they're not supposed to happen, and won't happen in 32-bit data mode).
1180  */
1181 
1182 vector_addrexcptn:
1183     b   vector_addrexcptn
1184 
1185 /*=============================================================================
1186  * FIQ "NMI" handler
1187  *-----------------------------------------------------------------------------
1188  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1189  * systems.
1190  */
1191     vector_stub fiq, FIQ_MODE, 4
1192 
1193     .long   __fiq_usr           @  0  (USR_26 / USR_32)
1194     .long   __fiq_svc           @  1  (FIQ_26 / FIQ_32)
1195     .long   __fiq_svc           @  2  (IRQ_26 / IRQ_32)
1196     .long   __fiq_svc           @  3  (SVC_26 / SVC_32)
1197     .long   __fiq_svc           @  4
1198     .long   __fiq_svc           @  5
1199     .long   __fiq_svc           @  6
1200     .long   __fiq_abt           @  7
1201     .long   __fiq_svc           @  8
1202     .long   __fiq_svc           @  9
1203     .long   __fiq_svc           @  a
1204     .long   __fiq_svc           @  b
1205     .long   __fiq_svc           @  c
1206     .long   __fiq_svc           @  d
1207     .long   __fiq_svc           @  e
1208     .long   __fiq_svc           @  f
1209 
1210     .globl  vector_fiq
1211 
1212     .section .vectors, "ax", %progbits
1213 .L__vectors_start:
1214     W(b)    vector_rst
1215     W(b)    vector_und
1216     W(ldr)  pc, .L__vectors_start + 0x1000
1217     W(b)    vector_pabt
1218     W(b)    vector_dabt
1219     W(b)    vector_addrexcptn
1220     W(b)    vector_irq
1221     W(b)    vector_fiq
1222 
1223     .data
1224 
1225     .globl  cr_alignment
1226 cr_alignment:
1227     .space  4
1228 
1229 #ifdef CONFIG_MULTI_IRQ_HANDLER
1230     .globl  handle_arch_irq
1231 handle_arch_irq:
1232     .space  4
1233 #endif