Back to home page

LXR

 
 

    


0001 /*
0002  * linux/arch/unicore32/kernel/entry.S
0003  *
0004  * Code specific to PKUnity SoC and UniCore ISA
0005  *
0006  * Copyright (C) 2001-2010 GUAN Xue-tao
0007  *
0008  * This program is free software; you can redistribute it and/or modify
0009  * it under the terms of the GNU General Public License version 2 as
0010  * published by the Free Software Foundation.
0011  *
0012  *  Low-level vector interface routines
0013  */
0014 #include <linux/init.h>
0015 #include <linux/linkage.h>
0016 #include <asm/assembler.h>
0017 #include <asm/errno.h>
0018 #include <asm/thread_info.h>
0019 #include <asm/memory.h>
0020 #include <asm/unistd.h>
0021 #include <generated/asm-offsets.h>
0022 #include "debug-macro.S"
0023 
0024 @
0025 @ Most of the stack format comes from struct pt_regs, but with
0026 @ the addition of 8 bytes for storing syscall args 5 and 6.
0027 @
0028 #define S_OFF       8
0029 
0030 /*
0031  * The SWI code relies on the fact that R0 is at the bottom of the stack
0032  * (due to slow/fast restore user regs).
0033  */
0034 #if S_R0 != 0
0035 #error "Please fix"
0036 #endif
0037 
0038     .macro  zero_fp
0039 #ifdef CONFIG_FRAME_POINTER
0040     mov fp, #0
0041 #endif
0042     .endm
0043 
0044     .macro  alignment_trap, rtemp
0045 #ifdef CONFIG_ALIGNMENT_TRAP
0046     ldw \rtemp, .LCcralign
0047     ldw \rtemp, [\rtemp]
0048     movc    p0.c1, \rtemp, #0
0049 #endif
0050     .endm
0051 
0052     .macro  load_user_sp_lr, rd, rtemp, offset = 0
0053     mov \rtemp, asr
0054     xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
0055     mov.a   asr, \rtemp         @ switch to the SUSR mode
0056 
0057     ldw sp, [\rd+], #\offset        @ load sp_user
0058     ldw lr, [\rd+], #\offset + 4    @ load lr_user
0059 
0060     xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
0061     mov.a   asr, \rtemp         @ switch back to the PRIV mode
0062     .endm
0063 
0064     .macro  priv_exit, rpsr
0065     mov.a   bsr, \rpsr
0066     ldm.w   (r0 - r15), [sp]+
0067     ldm.b   (r16 - pc), [sp]+       @ load r0 - pc, asr
0068     .endm
0069 
0070     .macro  restore_user_regs, fast = 0, offset = 0
0071     ldw r1, [sp+], #\offset + S_PSR @ get calling asr
0072     ldw lr, [sp+], #\offset + S_PC  @ get pc
0073     mov.a   bsr, r1             @ save in bsr_priv
0074     .if \fast
0075     add sp, sp, #\offset + S_R1     @ r0 is syscall return value
0076     ldm.w   (r1 - r15), [sp]+       @ get calling r1 - r15
0077     ldur    (r16 - lr), [sp]+       @ get calling r16 - lr
0078     .else
0079     ldm.w   (r0 - r15), [sp]+       @ get calling r0 - r15
0080     ldur    (r16 - lr), [sp]+       @ get calling r16 - lr
0081     .endif
0082     nop
0083     add sp, sp, #S_FRAME_SIZE - S_R16
0084     mov.a   pc, lr              @ return
0085                         @ and move bsr_priv into asr
0086     .endm
0087 
0088     .macro  get_thread_info, rd
0089     mov \rd, sp >> #13
0090     mov \rd, \rd << #13
0091     .endm
0092 
0093     .macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
0094     ldw \base, =(PKUNITY_INTC_BASE)
0095     ldw \irqstat, [\base+], #0xC    @ INTC_ICIP
0096     ldw \tmp,     [\base+], #0x4    @ INTC_ICMR
0097     and.a   \irqstat, \irqstat, \tmp
0098     beq 1001f
0099     cntlz   \irqnr, \irqstat
0100     rsub    \irqnr, \irqnr, #31
0101 1001:   /* EQ will be set if no irqs pending */
0102     .endm
0103 
0104 #ifdef CONFIG_DEBUG_LL
0105     .macro  printreg, reg, temp
0106         adr \temp, 901f
0107         stm (r0-r3), [\temp]+
0108         stw lr, [\temp+], #0x10
0109         mov r0, \reg
0110         b.l printhex8
0111         mov r0, #':'
0112         b.l printch
0113         mov r0, pc
0114         b.l printhex8
0115         adr r0, 902f
0116         b.l printascii
0117         adr \temp, 901f
0118         ldm (r0-r3), [\temp]+
0119         ldw lr, [\temp+], #0x10
0120         b   903f
0121 901:    .word   0, 0, 0, 0, 0   @ r0-r3, lr
0122 902:    .asciz  ": epip4d\n"
0123     .align
0124 903:
0125     .endm
0126 #endif
0127 
0128 /*
0129  * These are the registers used in the syscall handler, and allow us to
0130  * have in theory up to 7 arguments to a function - r0 to r6.
0131  *
0132  * Note that tbl == why is intentional.
0133  *
0134  * We must set at least "tsk" and "why" when calling ret_with_reschedule.
0135  */
0136 scno    .req    r21     @ syscall number
0137 tbl .req    r22     @ syscall table pointer
0138 why .req    r22     @ Linux syscall (!= 0)
0139 tsk .req    r23     @ current thread_info
0140 
0141 /*
0142  * Interrupt handling.  Preserves r17, r18, r19
0143  */
0144     .macro  intr_handler
0145 1:  get_irqnr_and_base r0, r6, r5, lr
0146     beq 2f
0147     mov r1, sp
0148     @
0149     @ routine called with r0 = irq number, r1 = struct pt_regs *
0150     @
0151     adr lr, 1b
0152     b   asm_do_IRQ
0153 2:
0154     .endm
0155 
0156 /*
0157  * PRIV mode handlers
0158  */
0159     .macro  priv_entry
0160     sub sp, sp, #(S_FRAME_SIZE - 4)
0161     stm (r1 - r15), [sp]+
0162     add r5, sp, #S_R15
0163     stm (r16 - r28), [r5]+
0164 
0165     ldm (r1 - r3), [r0]+
0166     add r5, sp, #S_SP - 4   @ here for interlock avoidance
0167     mov r4, #-1         @  ""  ""      ""       ""
0168     add r0, sp, #(S_FRAME_SIZE - 4)
0169     stw.w   r1, [sp+], #-4      @ save the "real" r0 copied
0170                     @ from the exception stack
0171 
0172     mov r1, lr
0173 
0174     @
0175     @ We are now ready to fill in the remaining blanks on the stack:
0176     @
0177     @  r0 - sp_priv
0178     @  r1 - lr_priv
0179     @  r2 - lr_<exception>, already fixed up for correct return/restart
0180     @  r3 - bsr_<exception>
0181     @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
0182     @
0183     stm (r0 - r4), [r5]+
0184     .endm
0185 
0186 /*
0187  * User mode handlers
0188  *
0189  */
0190     .macro  user_entry
0191     sub sp, sp, #S_FRAME_SIZE
0192     stm (r1 - r15), [sp+]
0193     add r4, sp, #S_R16
0194     stm (r16 - r28), [r4]+
0195 
0196     ldm (r1 - r3), [r0]+
0197     add r0, sp, #S_PC       @ here for interlock avoidance
0198     mov r4, #-1         @  ""  ""     ""        ""
0199 
0200     stw r1, [sp]        @ save the "real" r0 copied
0201                     @ from the exception stack
0202 
0203     @
0204     @ We are now ready to fill in the remaining blanks on the stack:
0205     @
0206     @  r2 - lr_<exception>, already fixed up for correct return/restart
0207     @  r3 - bsr_<exception>
0208     @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
0209     @
0210     @ Also, separately save sp_user and lr_user
0211     @
0212     stm (r2 - r4), [r0]+
0213     stur    (sp, lr), [r0-]
0214 
0215     @
0216     @ Enable the alignment trap while in kernel mode
0217     @
0218     alignment_trap r0
0219 
0220     @
0221     @ Clear FP to mark the first stack frame
0222     @
0223     zero_fp
0224     .endm
0225 
0226     .text
0227 
0228 @
0229 @ __invalid - generic code for failed exception
0230 @           (re-entrant version of handlers)
0231 @
0232 __invalid:
0233     sub sp, sp, #S_FRAME_SIZE
0234     stm (r1 - r15), [sp+]
0235     add r1, sp, #S_R16
0236     stm (r16 - r28, sp, lr), [r1]+
0237 
0238     zero_fp
0239 
0240     ldm (r4 - r6), [r0]+
0241     add r0, sp, #S_PC       @ here for interlock avoidance
0242     mov r7, #-1         @  ""   ""    ""        ""
0243     stw r4, [sp]        @ save preserved r0
0244     stm (r5 - r7), [r0]+    @ lr_<exception>,
0245                     @ asr_<exception>, "old_r0"
0246 
0247     mov r0, sp
0248     mov r1, asr
0249     b   bad_mode
0250 ENDPROC(__invalid)
0251 
0252     .align  5
0253 __dabt_priv:
0254     priv_entry
0255 
0256     @
0257     @ get ready to re-enable interrupts if appropriate
0258     @
0259     mov r17, asr
0260     cand.a  r3, #PSR_I_BIT
0261     bne 1f
0262     andn    r17, r17, #PSR_I_BIT
0263 1:
0264 
0265     @
0266     @ Call the processor-specific abort handler:
0267     @
0268     @  r2 - aborted context pc
0269     @  r3 - aborted context asr
0270     @
0271     @ The abort handler must return the aborted address in r0, and
0272     @ the fault status register in r1.
0273     @
0274     movc    r1, p0.c3, #0       @ get FSR
0275     movc    r0, p0.c4, #0       @ get FAR
0276 
0277     @
0278     @ set desired INTR state, then call main handler
0279     @
0280     mov.a   asr, r17
0281     mov r2, sp
0282     b.l do_DataAbort
0283 
0284     @
0285     @ INTRs off again before pulling preserved data off the stack
0286     @
0287     disable_irq r0
0288 
0289     @
0290     @ restore BSR and restart the instruction
0291     @
0292     ldw r2, [sp+], #S_PSR
0293     priv_exit r2                @ return from exception
0294 ENDPROC(__dabt_priv)
0295 
0296     .align  5
0297 __intr_priv:
0298     priv_entry
0299 
0300     intr_handler
0301 
0302     mov r0, #0              @ epip4d
0303     movc    p0.c5, r0, #14
0304     nop; nop; nop; nop; nop; nop; nop; nop
0305 
0306     ldw r4, [sp+], #S_PSR       @ irqs are already disabled
0307 
0308     priv_exit r4                @ return from exception
0309 ENDPROC(__intr_priv)
0310 
0311     .ltorg
0312 
0313     .align  5
0314 __extn_priv:
0315     priv_entry
0316 
0317     mov r0, sp              @ struct pt_regs *regs
0318     mov r1, asr
0319     b   bad_mode            @ not supported
0320 ENDPROC(__extn_priv)
0321 
0322     .align  5
0323 __pabt_priv:
0324     priv_entry
0325 
0326     @
0327     @ re-enable interrupts if appropriate
0328     @
0329     mov r17, asr
0330     cand.a  r3, #PSR_I_BIT
0331     bne 1f
0332     andn    r17, r17, #PSR_I_BIT
0333 1:
0334 
0335     @
0336     @ set args, then call main handler
0337     @
0338     @  r0 - address of faulting instruction
0339     @  r1 - pointer to registers on stack
0340     @
0341     mov r0, r2          @ pass address of aborted instruction
0342     mov r1, #5
0343     mov.a   asr, r17
0344     mov r2, sp          @ regs
0345     b.l do_PrefetchAbort    @ call abort handler
0346 
0347     @
0348     @ INTRs off again before pulling preserved data off the stack
0349     @
0350     disable_irq r0
0351 
0352     @
0353     @ restore BSR and restart the instruction
0354     @
0355     ldw r2, [sp+], #S_PSR
0356     priv_exit r2            @ return from exception
0357 ENDPROC(__pabt_priv)
0358 
0359     .align  5
0360 .LCcralign:
0361     .word   cr_alignment
0362 
0363     .align  5
0364 __dabt_user:
0365     user_entry
0366 
0367 #ifdef CONFIG_UNICORE_FPU_F64
0368     cff ip, s31
0369     cand.a  ip, #0x08000000     @ FPU execption traps?
0370     beq 209f
0371 
0372     ldw ip, [sp+], #S_PC
0373     add ip, ip, #4
0374     stw ip, [sp+], #S_PC
0375     @
0376     @ fall through to the emulation code, which returns using r19 if
0377     @ it has emulated the instruction, or the more conventional lr
0378     @ if we are to treat this as a real extended instruction
0379     @
0380     @  r0 - instruction
0381     @
0382 1:  ldw.u   r0, [r2]
0383     adr r19, ret_from_exception
0384     adr lr, 209f
0385     @
0386     @ fallthrough to call do_uc_f64
0387     @
0388 /*
0389  * Check whether the instruction is a co-processor instruction.
0390  * If yes, we need to call the relevant co-processor handler.
0391  *
0392  * Note that we don't do a full check here for the co-processor
0393  * instructions; all instructions with bit 27 set are well
0394  * defined.  The only instructions that should fault are the
0395  * co-processor instructions.
0396  *
0397  * Emulators may wish to make use of the following registers:
0398  *  r0  = instruction opcode.
0399  *  r2  = PC
0400  *  r19 = normal "successful" return address
0401  *  r20 = this threads thread_info structure.
0402  *  lr  = unrecognised instruction return address
0403  */
0404     get_thread_info r20         @ get current thread
0405     and r8, r0, #0x00003c00     @ mask out CP number
0406     mov r7, #1
0407     stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]
0408 
0409     @ F64 hardware support entry point.
0410     @  r0  = faulted instruction
0411     @  r19 = return address
0412     @  r20 = fp_state
0413     enable_irq r4
0414     add r20, r20, #TI_FPSTATE   @ r20 = workspace
0415     cff r1, s31         @ get fpu FPSCR
0416     andn    r2, r1, #0x08000000
0417     ctf     r2, s31         @ clear 27 bit
0418     mov r2, sp          @ nothing stacked - regdump is at TOS
0419     mov lr, r19         @ setup for a return to the user code
0420 
0421     @ Now call the C code to package up the bounce to the support code
0422     @   r0 holds the trigger instruction
0423     @   r1 holds the FPSCR value
0424     @   r2 pointer to register dump
0425     b   ucf64_exchandler
0426 209:
0427 #endif
0428     @
0429     @ Call the processor-specific abort handler:
0430     @
0431     @  r2 - aborted context pc
0432     @  r3 - aborted context asr
0433     @
0434     @ The abort handler must return the aborted address in r0, and
0435     @ the fault status register in r1.
0436     @
0437     movc    r1, p0.c3, #0       @ get FSR
0438     movc    r0, p0.c4, #0       @ get FAR
0439 
0440     @
0441     @ INTRs on, then call the main handler
0442     @
0443     enable_irq r2
0444     mov r2, sp
0445     adr lr, ret_from_exception
0446     b   do_DataAbort
0447 ENDPROC(__dabt_user)
0448 
0449     .align  5
0450 __intr_user:
0451     user_entry
0452 
0453     get_thread_info tsk
0454 
0455     intr_handler
0456 
0457     mov why, #0
0458     b   ret_to_user
0459 ENDPROC(__intr_user)
0460 
0461     .ltorg
0462 
0463     .align  5
0464 __extn_user:
0465     user_entry
0466 
0467     mov r0, sp
0468     mov r1, asr
0469     b   bad_mode
0470 ENDPROC(__extn_user)
0471 
0472     .align  5
0473 __pabt_user:
0474     user_entry
0475 
0476     mov r0, r2          @ pass address of aborted instruction.
0477     mov r1, #5
0478     enable_irq r1           @ Enable interrupts
0479     mov r2, sp          @ regs
0480     b.l do_PrefetchAbort    @ call abort handler
0481     /* fall through */
0482 /*
0483  * This is the return code to user mode for abort handlers
0484  */
0485 ENTRY(ret_from_exception)
0486     get_thread_info tsk
0487     mov why, #0
0488     b   ret_to_user
0489 ENDPROC(__pabt_user)
0490 ENDPROC(ret_from_exception)
0491 
0492 /*
0493  * Register switch for UniCore V2 processors
0494  * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
0495  * previous and next are guaranteed not to be the same.
0496  */
0497 ENTRY(__switch_to)
0498     add ip, r1, #TI_CPU_SAVE
0499     stm.w   (r4 - r15), [ip]+
0500     stm.w   (r16 - r27, sp, lr), [ip]+
0501 
0502 #ifdef  CONFIG_UNICORE_FPU_F64
0503     add ip, r1, #TI_FPSTATE
0504     sfm.w   (f0  - f7 ), [ip]+
0505     sfm.w   (f8  - f15), [ip]+
0506     sfm.w   (f16 - f23), [ip]+
0507     sfm.w   (f24 - f31), [ip]+
0508     cff r4, s31
0509     stw r4, [ip]
0510 
0511     add ip, r2, #TI_FPSTATE
0512     lfm.w   (f0  - f7 ), [ip]+
0513     lfm.w   (f8  - f15), [ip]+
0514     lfm.w   (f16 - f23), [ip]+
0515     lfm.w   (f24 - f31), [ip]+
0516     ldw r4, [ip]
0517     ctf r4, s31
0518 #endif
0519     add ip, r2, #TI_CPU_SAVE
0520     ldm.w   (r4 - r15), [ip]+
0521     ldm (r16 - r27, sp, pc), [ip]+  @ Load all regs saved previously
0522 ENDPROC(__switch_to)
0523 
0524     .align  5
0525 /*
0526  * This is the fast syscall return path.  We do as little as
0527  * possible here, and this includes saving r0 back into the PRIV
0528  * stack.
0529  */
0530 ret_fast_syscall:
0531     disable_irq r1              @ disable interrupts
0532     ldw r1, [tsk+], #TI_FLAGS
0533     cand.a  r1, #_TIF_WORK_MASK
0534     bne fast_work_pending
0535 
0536     @ fast_restore_user_regs
0537     restore_user_regs fast = 1, offset = S_OFF
0538 
0539 /*
0540  * Ok, we need to do extra processing, enter the slow path.
0541  */
0542 fast_work_pending:
0543     stw.w   r0, [sp+], #S_R0+S_OFF      @ returned r0
0544 work_pending:
0545     cand.a  r1, #_TIF_NEED_RESCHED
0546     bne work_resched
0547     mov r0, sp              @ 'regs'
0548     mov r2, why             @ 'syscall'
0549     cand.a  r1, #_TIF_SIGPENDING        @ delivering a signal?
0550     cmovne  why, #0             @ prevent further restarts
0551     b.l do_notify_resume
0552     b   ret_slow_syscall        @ Check work again
0553 
0554 work_resched:
0555     b.l schedule
0556 /*
0557  * "slow" syscall return path.  "why" tells us if this was a real syscall.
0558  */
0559 ENTRY(ret_to_user)
0560 ret_slow_syscall:
0561     disable_irq r1              @ disable interrupts
0562     get_thread_info tsk         @ epip4d, one path error?!
0563     ldw r1, [tsk+], #TI_FLAGS
0564     cand.a  r1, #_TIF_WORK_MASK
0565     bne work_pending
0566 no_work_pending:
0567     @ slow_restore_user_regs
0568     restore_user_regs fast = 0, offset = 0
0569 ENDPROC(ret_to_user)
0570 
0571 /*
0572  * This is how we return from a fork.
0573  */
0574 ENTRY(ret_from_fork)
0575     b.l schedule_tail
0576     b   ret_slow_syscall
0577 ENDPROC(ret_from_fork)
0578 
0579 ENTRY(ret_from_kernel_thread)
0580     b.l schedule_tail
0581     mov r0, r5
0582     adr lr, ret_slow_syscall
0583     mov pc, r4
0584 ENDPROC(ret_from_kernel_thread)
0585 
0586 /*=============================================================================
0587  * SWI handler
0588  *-----------------------------------------------------------------------------
0589  */
0590     .align  5
0591 ENTRY(vector_swi)
0592     sub sp, sp, #S_FRAME_SIZE
0593     stm (r0 - r15), [sp]+       @ Calling r0 - r15
0594     add r8, sp, #S_R16
0595     stm (r16 - r28), [r8]+      @ Calling r16 - r28
0596     add r8, sp, #S_PC
0597     stur    (sp, lr), [r8-]         @ Calling sp, lr
0598     mov r8, bsr             @ called from non-REAL mode
0599     stw lr, [sp+], #S_PC        @ Save calling PC
0600     stw r8, [sp+], #S_PSR       @ Save ASR
0601     stw r0, [sp+], #S_OLD_R0        @ Save OLD_R0
0602     zero_fp
0603 
0604     /*
0605      * Get the system call number.
0606      */
0607     sub ip, lr, #4
0608     ldw.u   scno, [ip]          @ get SWI instruction
0609 
0610 #ifdef CONFIG_ALIGNMENT_TRAP
0611     ldw ip, __cr_alignment
0612     ldw ip, [ip]
0613     movc    p0.c1, ip, #0                   @ update control register
0614 #endif
0615     enable_irq ip
0616 
0617     get_thread_info tsk
0618     ldw tbl, =sys_call_table        @ load syscall table pointer
0619 
0620     andn    scno, scno, #0xff000000     @ mask off SWI op-code
0621     andn    scno, scno, #0x00ff0000     @ mask off SWI op-code
0622 
0623     stm.w   (r4, r5), [sp-]         @ push fifth and sixth args
0624     ldw ip, [tsk+], #TI_FLAGS       @ check for syscall tracing
0625     cand.a  ip, #_TIF_SYSCALL_TRACE     @ are we tracing syscalls?
0626     bne __sys_trace
0627 
0628     csub.a  scno, #__NR_syscalls        @ check upper syscall limit
0629     adr lr, ret_fast_syscall        @ return address
0630     bea 1f
0631     ldw pc, [tbl+], scno << #2      @ call sys_* routine
0632 1:
0633     add r1, sp, #S_OFF
0634 2:  mov why, #0             @ no longer a real syscall
0635     b   sys_ni_syscall          @ not private func
0636 
0637     /*
0638      * This is the really slow path.  We're going to be doing
0639      * context switches, and waiting for our parent to respond.
0640      */
0641 __sys_trace:
0642     mov r2, scno
0643     add r1, sp, #S_OFF
0644     mov r0, #0              @ trace entry [IP = 0]
0645     b.l syscall_trace
0646 
0647     adr lr, __sys_trace_return      @ return address
0648     mov scno, r0            @ syscall number (possibly new)
0649     add r1, sp, #S_R0 + S_OFF       @ pointer to regs
0650     csub.a  scno, #__NR_syscalls        @ check upper syscall limit
0651     bea 2b
0652     ldm (r0 - r3), [r1]+        @ have to reload r0 - r3
0653     ldw pc, [tbl+], scno << #2      @ call sys_* routine
0654 
0655 __sys_trace_return:
0656     stw.w   r0, [sp+], #S_R0 + S_OFF    @ save returned r0
0657     mov r2, scno
0658     mov r1, sp
0659     mov r0, #1              @ trace exit [IP = 1]
0660     b.l syscall_trace
0661     b   ret_slow_syscall
0662 
0663     .align  5
0664 #ifdef CONFIG_ALIGNMENT_TRAP
0665     .type   __cr_alignment, #object
0666 __cr_alignment:
0667     .word   cr_alignment
0668 #endif
0669     .ltorg
0670 
0671 ENTRY(sys_rt_sigreturn)
0672         add r0, sp, #S_OFF
0673         mov why, #0     @ prevent syscall restart handling
0674         b   __sys_rt_sigreturn
0675 ENDPROC(sys_rt_sigreturn)
0676 
0677     __INIT
0678 
0679 /*
0680  * Vector stubs.
0681  *
0682  * This code is copied to 0xffff0200 so we can use branches in the
0683  * vectors, rather than ldr's.  Note that this code must not
0684  * exceed 0x300 bytes.
0685  *
0686  * Common stub entry macro:
0687  *   Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
0688  *
0689  * SP points to a minimal amount of processor-private memory, the address
0690  * of which is copied into r0 for the mode specific abort handler.
0691  */
0692     .macro  vector_stub, name, mode
0693     .align  5
0694 
0695 vector_\name:
0696     @
0697     @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
0698     @ (parent ASR)
0699     @
0700     stw r0, [sp]
0701     stw lr, [sp+], #4       @ save r0, lr
0702     mov lr, bsr
0703     stw lr, [sp+], #8       @ save bsr
0704 
0705     @
0706     @ Prepare for PRIV mode.  INTRs remain disabled.
0707     @
0708     mov r0, asr
0709     xor r0, r0, #(\mode ^ PRIV_MODE)
0710     mov.a   bsr, r0
0711 
0712     @
0713     @ the branch table must immediately follow this code
0714     @
0715     and lr, lr, #0x03
0716     add lr, lr, #1
0717     mov r0, sp
0718     ldw lr, [pc+], lr << #2
0719     mov.a   pc, lr          @ branch to handler in PRIV mode
0720 ENDPROC(vector_\name)
0721     .align  2
0722     @ handler addresses follow this label
0723     .endm
0724 
0725     .globl  __stubs_start
0726 __stubs_start:
0727 /*
0728  * Interrupt dispatcher
0729  */
0730     vector_stub intr, INTR_MODE
0731 
0732     .long   __intr_user         @  0  (USER)
0733     .long   __invalid           @  1
0734     .long   __invalid           @  2
0735     .long   __intr_priv         @  3  (PRIV)
0736 
0737 /*
0738  * Data abort dispatcher
0739  * Enter in ABT mode, bsr = USER ASR, lr = USER PC
0740  */
0741     vector_stub dabt, ABRT_MODE
0742 
0743     .long   __dabt_user         @  0  (USER)
0744     .long   __invalid           @  1
0745     .long   __invalid           @  2  (INTR)
0746     .long   __dabt_priv         @  3  (PRIV)
0747 
0748 /*
0749  * Prefetch abort dispatcher
0750  * Enter in ABT mode, bsr = USER ASR, lr = USER PC
0751  */
0752     vector_stub pabt, ABRT_MODE
0753 
0754     .long   __pabt_user         @  0 (USER)
0755     .long   __invalid           @  1
0756     .long   __invalid           @  2 (INTR)
0757     .long   __pabt_priv         @  3 (PRIV)
0758 
0759 /*
0760  * Undef instr entry dispatcher
0761  * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
0762  */
0763     vector_stub extn, EXTN_MODE
0764 
0765     .long   __extn_user         @  0 (USER)
0766     .long   __invalid           @  1
0767     .long   __invalid           @  2 (INTR)
0768     .long   __extn_priv         @  3 (PRIV)
0769 
0770 /*
0771  * We group all the following data together to optimise
0772  * for CPUs with separate I & D caches.
0773  */
0774     .align  5
0775 
0776 .LCvswi:
0777     .word   vector_swi
0778 
0779     .globl  __stubs_end
0780 __stubs_end:
0781 
0782     .equ    stubs_offset, __vectors_start + 0x200 - __stubs_start
0783 
0784     .globl  __vectors_start
0785 __vectors_start:
0786     jepriv  SYS_ERROR0
0787     b   vector_extn + stubs_offset
0788     ldw pc, .LCvswi + stubs_offset
0789     b   vector_pabt + stubs_offset
0790     b   vector_dabt + stubs_offset
0791     jepriv  SYS_ERROR0
0792     b   vector_intr + stubs_offset
0793     jepriv  SYS_ERROR0
0794 
0795     .globl  __vectors_end
0796 __vectors_end:
0797 
0798     .data
0799 
0800     .globl  cr_alignment
0801     .globl  cr_no_alignment
0802 cr_alignment:
0803     .space  4
0804 cr_no_alignment:
0805     .space  4