Back to home page

LXR

 
 

    


0001 /*
0002  * arch/sh/kernel/cpu/sh5/entry.S
0003  *
0004  * Copyright (C) 2000, 2001  Paolo Alberelli
0005  * Copyright (C) 2004 - 2008  Paul Mundt
0006  * Copyright (C) 2003, 2004  Richard Curnow
0007  *
0008  * This file is subject to the terms and conditions of the GNU General Public
0009  * License.  See the file "COPYING" in the main directory of this archive
0010  * for more details.
0011  */
0012 #include <linux/errno.h>
0013 #include <linux/init.h>
0014 #include <linux/sys.h>
0015 #include <cpu/registers.h>
0016 #include <asm/processor.h>
0017 #include <asm/unistd.h>
0018 #include <asm/thread_info.h>
0019 #include <asm/asm-offsets.h>
0020 
0021 /*
0022  * SR fields.
0023  */
0024 #define SR_ASID_MASK    0x00ff0000
0025 #define SR_FD_MASK  0x00008000
0026 #define SR_SS       0x08000000
0027 #define SR_BL       0x10000000
0028 #define SR_MD       0x40000000
0029 
0030 /*
0031  * Event code.
0032  */
0033 #define EVENT_INTERRUPT     0
0034 #define EVENT_FAULT_TLB     1
0035 #define EVENT_FAULT_NOT_TLB 2
0036 #define EVENT_DEBUG     3
0037 
0038 /* EXPEVT values */
0039 #define RESET_CAUSE     0x20
0040 #define DEBUGSS_CAUSE       0x980
0041 
0042 /*
0043  * Frame layout. Quad index.
0044  */
0045 #define FRAME_T(x)  FRAME_TBASE+(x*8)
0046 #define FRAME_R(x)  FRAME_RBASE+(x*8)
0047 #define FRAME_S(x)  FRAME_SBASE+(x*8)
0048 #define FSPC        0
0049 #define FSSR        1
0050 #define FSYSCALL_ID 2
0051 
0052 /* Arrange the save frame to be a multiple of 32 bytes long */
0053 #define FRAME_SBASE 0
0054 #define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
0055 #define FRAME_TBASE (FRAME_RBASE+(63*8))    /* r0 - r62 */
0056 #define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
0057 #define FRAME_SIZE  (FRAME_PBASE+(2*8)) /* pad0-pad1 */
0058 
0059 #define FP_FRAME_SIZE   FP_FRAME_BASE+(33*8)    /* dr0 - dr31 + fpscr */
0060 #define FP_FRAME_BASE   0
0061 
0062 #define SAVED_R2    0*8
0063 #define SAVED_R3    1*8
0064 #define SAVED_R4    2*8
0065 #define SAVED_R5    3*8
0066 #define SAVED_R18   4*8
0067 #define SAVED_R6    5*8
0068 #define SAVED_TR0   6*8
0069 
0070 /* These are the registers saved in the TLB path that aren't saved in the first
0071    level of the normal one. */
0072 #define TLB_SAVED_R25   7*8
0073 #define TLB_SAVED_TR1   8*8
0074 #define TLB_SAVED_TR2   9*8
0075 #define TLB_SAVED_TR3   10*8
0076 #define TLB_SAVED_TR4   11*8
0077 /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
0078    breakage otherwise. */
0079 #define TLB_SAVED_R0    12*8
0080 #define TLB_SAVED_R1    13*8
0081 
0082 #define CLI()               \
0083     getcon  SR, r6;         \
0084     ori r6, 0xf0, r6;       \
0085     putcon  r6, SR;
0086 
0087 #define STI()               \
0088     getcon  SR, r6;         \
0089     andi    r6, ~0xf0, r6;      \
0090     putcon  r6, SR;
0091 
0092 #ifdef CONFIG_PREEMPT
0093 #  define preempt_stop()    CLI()
0094 #else
0095 #  define preempt_stop()
0096 #  define resume_kernel     restore_all
0097 #endif
0098 
0099     .section    .data, "aw"
0100 
0101 #define FAST_TLBMISS_STACK_CACHELINES 4
0102 #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
0103 
0104 /* Register back-up area for all exceptions */
0105     .balign 32
0106     /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
0107      * register saves etc. */
0108     .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
0109 /* This is 32 byte aligned by construction */
0110 /* Register back-up area for all exceptions */
0111 reg_save_area:
0112     .quad   0
0113     .quad   0
0114     .quad   0
0115     .quad   0
0116 
0117     .quad   0
0118     .quad   0
0119     .quad   0
0120     .quad   0
0121 
0122     .quad   0
0123     .quad   0
0124     .quad   0
0125     .quad   0
0126 
0127     .quad   0
0128     .quad   0
0129 
0130 /* Save area for RESVEC exceptions. We cannot use reg_save_area because of
0131  * reentrancy. Note this area may be accessed via physical address.
0132  * Align so this fits a whole single cache line, for ease of purging.
0133  */
0134     .balign 32,0,32
0135 resvec_save_area:
0136     .quad   0
0137     .quad   0
0138     .quad   0
0139     .quad   0
0140     .quad   0
0141     .balign 32,0,32
0142 
0143 /* Jump table of 3rd level handlers  */
0144 trap_jtable:
0145     .long   do_exception_error      /* 0x000 */
0146     .long   do_exception_error      /* 0x020 */
0147 #ifdef CONFIG_MMU
0148     .long   tlb_miss_load               /* 0x040 */
0149     .long   tlb_miss_store              /* 0x060 */
0150 #else
0151     .long   do_exception_error
0152     .long   do_exception_error
0153 #endif
0154     ! ARTIFICIAL pseudo-EXPEVT setting
0155     .long   do_debug_interrupt      /* 0x080 */
0156 #ifdef CONFIG_MMU
0157     .long   tlb_miss_load               /* 0x0A0 */
0158     .long   tlb_miss_store              /* 0x0C0 */
0159 #else
0160     .long   do_exception_error
0161     .long   do_exception_error
0162 #endif
0163     .long   do_address_error_load   /* 0x0E0 */
0164     .long   do_address_error_store  /* 0x100 */
0165 #ifdef CONFIG_SH_FPU
0166     .long   do_fpu_error        /* 0x120 */
0167 #else
0168     .long   do_exception_error      /* 0x120 */
0169 #endif
0170     .long   do_exception_error      /* 0x140 */
0171     .long   system_call             /* 0x160 */
0172     .long   do_reserved_inst        /* 0x180 */
0173     .long   do_illegal_slot_inst    /* 0x1A0 */
0174     .long   do_exception_error      /* 0x1C0 - NMI */
0175     .long   do_exception_error      /* 0x1E0 */
0176     .rept 15
0177         .long do_IRQ        /* 0x200 - 0x3C0 */
0178     .endr
0179     .long   do_exception_error      /* 0x3E0 */
0180     .rept 32
0181         .long do_IRQ        /* 0x400 - 0x7E0 */
0182     .endr
0183     .long   fpu_error_or_IRQA           /* 0x800 */
0184     .long   fpu_error_or_IRQB           /* 0x820 */
0185     .long   do_IRQ          /* 0x840 */
0186     .long   do_IRQ          /* 0x860 */
0187     .rept 6
0188         .long do_exception_error    /* 0x880 - 0x920 */
0189     .endr
0190     .long   breakpoint_trap_handler /* 0x940 */
0191     .long   do_exception_error      /* 0x960 */
0192     .long   do_single_step      /* 0x980 */
0193 
0194     .rept 3
0195         .long do_exception_error    /* 0x9A0 - 0x9E0 */
0196     .endr
0197     .long   do_IRQ          /* 0xA00 */
0198     .long   do_IRQ          /* 0xA20 */
0199 #ifdef CONFIG_MMU
0200     .long   itlb_miss_or_IRQ            /* 0xA40 */
0201 #else
0202     .long   do_IRQ
0203 #endif
0204     .long   do_IRQ          /* 0xA60 */
0205     .long   do_IRQ          /* 0xA80 */
0206 #ifdef CONFIG_MMU
0207     .long   itlb_miss_or_IRQ            /* 0xAA0 */
0208 #else
0209     .long   do_IRQ
0210 #endif
0211     .long   do_exception_error      /* 0xAC0 */
0212     .long   do_address_error_exec   /* 0xAE0 */
0213     .rept 8
0214         .long do_exception_error    /* 0xB00 - 0xBE0 */
0215     .endr
0216     .rept 18
0217         .long do_IRQ        /* 0xC00 - 0xE20 */
0218     .endr
0219 
0220     .section    .text64, "ax"
0221 
0222 /*
0223  * --- Exception/Interrupt/Event Handling Section
0224  */
0225 
0226 /*
0227  * VBR and RESVEC blocks.
0228  *
0229  * First level handler for VBR-based exceptions.
0230  *
0231  * To avoid waste of space, align to the maximum text block size.
0232  * This is assumed to be at most 128 bytes or 32 instructions.
0233  * DO NOT EXCEED 32 instructions on the first level handlers !
0234  *
0235  * Also note that RESVEC is contained within the VBR block
0236  * where the room left (1KB - TEXT_SIZE) allows placing
0237  * the RESVEC block (at most 512B + TEXT_SIZE).
0238  *
0239  * So first (and only) level handler for RESVEC-based exceptions.
0240  *
0241  * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
0242  * and interrupt) we are a lot tight with register space until
0243  * saving onto the stack frame, which is done in handle_exception().
0244  *
0245  */
0246 
0247 #define TEXT_SIZE   128
0248 #define BLOCK_SIZE  1664        /* Dynamic check, 13*128 */
0249 
0250     .balign TEXT_SIZE
0251 LVBR_block:
0252     .space  256, 0          /* Power-on class handler, */
0253                     /* not required here       */
0254 not_a_tlb_miss:
0255     synco   /* TAKum03020 (but probably a good idea anyway.) */
0256     /* Save original stack pointer into KCR1 */
0257     putcon  SP, KCR1
0258 
0259     /* Save other original registers into reg_save_area */
0260         movi  reg_save_area, SP
0261     st.q    SP, SAVED_R2, r2
0262     st.q    SP, SAVED_R3, r3
0263     st.q    SP, SAVED_R4, r4
0264     st.q    SP, SAVED_R5, r5
0265     st.q    SP, SAVED_R6, r6
0266     st.q    SP, SAVED_R18, r18
0267     gettr   tr0, r3
0268     st.q    SP, SAVED_TR0, r3
0269 
0270     /* Set args for Non-debug, Not a TLB miss class handler */
0271     getcon  EXPEVT, r2
0272     movi    ret_from_exception, r3
0273     ori r3, 1, r3
0274     movi    EVENT_FAULT_NOT_TLB, r4
0275     or  SP, ZERO, r5
0276     getcon  KCR1, SP
0277     pta handle_exception, tr0
0278     blink   tr0, ZERO
0279 
0280     .balign 256
0281     ! VBR+0x200
0282     nop
0283     .balign 256
0284     ! VBR+0x300
0285     nop
0286     .balign 256
0287     /*
0288      * Instead of the natural .balign 1024 place RESVEC here
0289      * respecting the final 1KB alignment.
0290      */
0291     .balign TEXT_SIZE
0292     /*
0293      * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
0294      * block making sure the final alignment is correct.
0295      */
0296 #ifdef CONFIG_MMU
0297 tlb_miss:
0298     synco   /* TAKum03020 (but probably a good idea anyway.) */
0299     putcon  SP, KCR1
0300     movi    reg_save_area, SP
0301     /* SP is guaranteed 32-byte aligned. */
0302     st.q    SP, TLB_SAVED_R0 , r0
0303     st.q    SP, TLB_SAVED_R1 , r1
0304     st.q    SP, SAVED_R2 , r2
0305     st.q    SP, SAVED_R3 , r3
0306     st.q    SP, SAVED_R4 , r4
0307     st.q    SP, SAVED_R5 , r5
0308     st.q    SP, SAVED_R6 , r6
0309     st.q    SP, SAVED_R18, r18
0310 
0311     /* Save R25 for safety; as/ld may want to use it to achieve the call to
0312      * the code in mm/tlbmiss.c */
0313     st.q    SP, TLB_SAVED_R25, r25
0314     gettr   tr0, r2
0315     gettr   tr1, r3
0316     gettr   tr2, r4
0317     gettr   tr3, r5
0318     gettr   tr4, r18
0319     st.q    SP, SAVED_TR0 , r2
0320     st.q    SP, TLB_SAVED_TR1 , r3
0321     st.q    SP, TLB_SAVED_TR2 , r4
0322     st.q    SP, TLB_SAVED_TR3 , r5
0323     st.q    SP, TLB_SAVED_TR4 , r18
0324 
0325     pt  do_fast_page_fault, tr0
0326     getcon  SSR, r2
0327     getcon  EXPEVT, r3
0328     getcon  TEA, r4
0329     shlri   r2, 30, r2
0330     andi    r2, 1, r2   /* r2 = SSR.MD */
0331     blink   tr0, LINK
0332 
0333     pt  fixup_to_invoke_general_handler, tr1
0334 
0335     /* If the fast path handler fixed the fault, just drop through quickly
0336        to the restore code right away to return to the excepting context.
0337        */
0338     bnei/u  r2, 0, tr1
0339 
0340 fast_tlb_miss_restore:
0341     ld.q    SP, SAVED_TR0, r2
0342     ld.q    SP, TLB_SAVED_TR1, r3
0343     ld.q    SP, TLB_SAVED_TR2, r4
0344 
0345     ld.q    SP, TLB_SAVED_TR3, r5
0346     ld.q    SP, TLB_SAVED_TR4, r18
0347 
0348     ptabs   r2, tr0
0349     ptabs   r3, tr1
0350     ptabs   r4, tr2
0351     ptabs   r5, tr3
0352     ptabs   r18, tr4
0353 
0354     ld.q    SP, TLB_SAVED_R0, r0
0355     ld.q    SP, TLB_SAVED_R1, r1
0356     ld.q    SP, SAVED_R2, r2
0357     ld.q    SP, SAVED_R3, r3
0358     ld.q    SP, SAVED_R4, r4
0359     ld.q    SP, SAVED_R5, r5
0360     ld.q    SP, SAVED_R6, r6
0361     ld.q    SP, SAVED_R18, r18
0362     ld.q    SP, TLB_SAVED_R25, r25
0363 
0364     getcon  KCR1, SP
0365     rte
0366     nop /* for safety, in case the code is run on sh5-101 cut1.x */
0367 
0368 fixup_to_invoke_general_handler:
0369 
0370     /* OK, new method.  Restore stuff that's not expected to get saved into
0371        the 'first-level' reg save area, then just fall through to setting
0372        up the registers and calling the second-level handler. */
0373 
0374     /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
0375        r25,tr1-4 and save r6 to get into the right state.  */
0376 
0377     ld.q    SP, TLB_SAVED_TR1, r3
0378     ld.q    SP, TLB_SAVED_TR2, r4
0379     ld.q    SP, TLB_SAVED_TR3, r5
0380     ld.q    SP, TLB_SAVED_TR4, r18
0381     ld.q    SP, TLB_SAVED_R25, r25
0382 
0383     ld.q    SP, TLB_SAVED_R0, r0
0384     ld.q    SP, TLB_SAVED_R1, r1
0385 
0386     ptabs/u r3, tr1
0387     ptabs/u r4, tr2
0388     ptabs/u r5, tr3
0389     ptabs/u r18, tr4
0390 
0391     /* Set args for Non-debug, TLB miss class handler */
0392     getcon  EXPEVT, r2
0393     movi    ret_from_exception, r3
0394     ori r3, 1, r3
0395     movi    EVENT_FAULT_TLB, r4
0396     or  SP, ZERO, r5
0397     getcon  KCR1, SP
0398     pta handle_exception, tr0
0399     blink   tr0, ZERO
0400 #else /* CONFIG_MMU */
0401     .balign 256
0402 #endif
0403 
0404 /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
0405    DOES END UP AT VBR+0x600 */
0406     nop
0407     nop
0408     nop
0409     nop
0410     nop
0411     nop
0412 
0413     .balign 256
0414     /* VBR + 0x600 */
0415 
0416 interrupt:
0417     synco   /* TAKum03020 (but probably a good idea anyway.) */
0418     /* Save original stack pointer into KCR1 */
0419     putcon  SP, KCR1
0420 
0421     /* Save other original registers into reg_save_area */
0422         movi  reg_save_area, SP
0423     st.q    SP, SAVED_R2, r2
0424     st.q    SP, SAVED_R3, r3
0425     st.q    SP, SAVED_R4, r4
0426     st.q    SP, SAVED_R5, r5
0427     st.q    SP, SAVED_R6, r6
0428     st.q    SP, SAVED_R18, r18
0429     gettr   tr0, r3
0430     st.q    SP, SAVED_TR0, r3
0431 
0432     /* Set args for interrupt class handler */
0433     getcon  INTEVT, r2
0434     movi    ret_from_irq, r3
0435     ori r3, 1, r3
0436     movi    EVENT_INTERRUPT, r4
0437     or  SP, ZERO, r5
0438     getcon  KCR1, SP
0439     pta handle_exception, tr0
0440     blink   tr0, ZERO
0441     .balign TEXT_SIZE       /* let's waste the bare minimum */
0442 
0443 LVBR_block_end:             /* Marker. Used for total checking */
0444 
0445     .balign 256
0446 LRESVEC_block:
0447     /* Panic handler. Called with MMU off. Possible causes/actions:
0448      * - Reset:     Jump to program start.
0449      * - Single Step:   Turn off Single Step & return.
0450      * - Others:        Call panic handler, passing PC as arg.
0451      *          (this may need to be extended...)
0452      */
0453 reset_or_panic:
0454     synco   /* TAKum03020 (but probably a good idea anyway.) */
0455     putcon  SP, DCR
0456     /* First save r0-1 and tr0, as we need to use these */
0457     movi    resvec_save_area-CONFIG_PAGE_OFFSET, SP
0458     st.q    SP, 0, r0
0459     st.q    SP, 8, r1
0460     gettr   tr0, r0
0461     st.q    SP, 32, r0
0462 
0463     /* Check cause */
0464     getcon  EXPEVT, r0
0465     movi    RESET_CAUSE, r1
0466     sub r1, r0, r1      /* r1=0 if reset */
0467     movi    _stext-CONFIG_PAGE_OFFSET, r0
0468     ori r0, 1, r0
0469     ptabs   r0, tr0
0470     beqi    r1, 0, tr0      /* Jump to start address if reset */
0471 
0472     getcon  EXPEVT, r0
0473     movi    DEBUGSS_CAUSE, r1
0474     sub r1, r0, r1      /* r1=0 if single step */
0475     pta single_step_panic, tr0
0476     beqi    r1, 0, tr0      /* jump if single step */
0477 
0478     /* Now jump to where we save the registers. */
0479     movi    panic_stash_regs-CONFIG_PAGE_OFFSET, r1
0480     ptabs   r1, tr0
0481     blink   tr0, r63
0482 
0483 single_step_panic:
0484     /* We are in a handler with Single Step set. We need to resume the
0485      * handler, by turning on MMU & turning off Single Step. */
0486     getcon  SSR, r0
0487     movi    SR_MMU, r1
0488     or  r0, r1, r0
0489     movi    ~SR_SS, r1
0490     and r0, r1, r0
0491     putcon  r0, SSR
0492     /* Restore EXPEVT, as the rte won't do this */
0493     getcon  PEXPEVT, r0
0494     putcon  r0, EXPEVT
0495     /* Restore regs */
0496     ld.q    SP, 32, r0
0497     ptabs   r0, tr0
0498     ld.q    SP, 0, r0
0499     ld.q    SP, 8, r1
0500     getcon  DCR, SP
0501     synco
0502     rte
0503 
0504 
0505     .balign 256
0506 debug_exception:
0507     synco   /* TAKum03020 (but probably a good idea anyway.) */
0508     /*
0509      * Single step/software_break_point first level handler.
0510      * Called with MMU off, so the first thing we do is enable it
0511      * by doing an rte with appropriate SSR.
0512      */
0513     putcon  SP, DCR
0514     /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
0515     movi    resvec_save_area-CONFIG_PAGE_OFFSET, SP
0516 
0517     /* With the MMU off, we are bypassing the cache, so purge any
0518          * data that will be made stale by the following stores.
0519          */
0520     ocbp    SP, 0
0521     synco
0522 
0523     st.q    SP, 0, r0
0524     st.q    SP, 8, r1
0525     getcon  SPC, r0
0526     st.q    SP, 16, r0
0527     getcon  SSR, r0
0528     st.q    SP, 24, r0
0529 
0530     /* Enable MMU, block exceptions, set priv mode, disable single step */
0531     movi    SR_MMU | SR_BL | SR_MD, r1
0532     or  r0, r1, r0
0533     movi    ~SR_SS, r1
0534     and r0, r1, r0
0535     putcon  r0, SSR
0536     /* Force control to debug_exception_2 when rte is executed */
0537     movi    debug_exeception_2, r0
0538     ori r0, 1, r0      /* force SHmedia, just in case */
0539     putcon  r0, SPC
0540     getcon  DCR, SP
0541     synco
0542     rte
0543 debug_exeception_2:
0544     /* Restore saved regs */
0545     putcon  SP, KCR1
0546     movi    resvec_save_area, SP
0547     ld.q    SP, 24, r0
0548     putcon  r0, SSR
0549     ld.q    SP, 16, r0
0550     putcon  r0, SPC
0551     ld.q    SP, 0, r0
0552     ld.q    SP, 8, r1
0553 
0554     /* Save other original registers into reg_save_area */
0555         movi  reg_save_area, SP
0556     st.q    SP, SAVED_R2, r2
0557     st.q    SP, SAVED_R3, r3
0558     st.q    SP, SAVED_R4, r4
0559     st.q    SP, SAVED_R5, r5
0560     st.q    SP, SAVED_R6, r6
0561     st.q    SP, SAVED_R18, r18
0562     gettr   tr0, r3
0563     st.q    SP, SAVED_TR0, r3
0564 
0565     /* Set args for debug class handler */
0566     getcon  EXPEVT, r2
0567     movi    ret_from_exception, r3
0568     ori r3, 1, r3
0569     movi    EVENT_DEBUG, r4
0570     or  SP, ZERO, r5
0571     getcon  KCR1, SP
0572     pta handle_exception, tr0
0573     blink   tr0, ZERO
0574 
0575     .balign 256
0576 debug_interrupt:
0577     /* !!! WE COME HERE IN REAL MODE !!! */
0578     /* Hook-up debug interrupt to allow various debugging options to be
0579      * hooked into its handler. */
0580     /* Save original stack pointer into KCR1 */
0581     synco
0582     putcon  SP, KCR1
0583     movi    resvec_save_area-CONFIG_PAGE_OFFSET, SP
0584     ocbp    SP, 0
0585     ocbp    SP, 32
0586     synco
0587 
0588     /* Save other original registers into reg_save_area thru real addresses */
0589     st.q    SP, SAVED_R2, r2
0590     st.q    SP, SAVED_R3, r3
0591     st.q    SP, SAVED_R4, r4
0592     st.q    SP, SAVED_R5, r5
0593     st.q    SP, SAVED_R6, r6
0594     st.q    SP, SAVED_R18, r18
0595     gettr   tr0, r3
0596     st.q    SP, SAVED_TR0, r3
0597 
0598     /* move (spc,ssr)->(pspc,pssr).  The rte will shift
0599        them back again, so that they look like the originals
0600        as far as the real handler code is concerned. */
0601     getcon  spc, r6
0602     putcon  r6, pspc
0603     getcon  ssr, r6
0604     putcon  r6, pssr
0605 
0606     ! construct useful SR for handle_exception
0607     movi    3, r6
0608     shlli   r6, 30, r6
0609     getcon  sr, r18
0610     or  r18, r6, r6
0611     putcon  r6, ssr
0612 
0613     ! SSR is now the current SR with the MD and MMU bits set
0614     ! i.e. the rte will switch back to priv mode and put
0615     ! the mmu back on
0616 
0617     ! construct spc
0618     movi    handle_exception, r18
0619     ori r18, 1, r18     ! for safety (do we need this?)
0620     putcon  r18, spc
0621 
0622     /* Set args for Non-debug, Not a TLB miss class handler */
0623 
0624     ! EXPEVT==0x80 is unused, so 'steal' this value to put the
0625     ! debug interrupt handler in the vectoring table
0626     movi    0x80, r2
0627     movi    ret_from_exception, r3
0628     ori r3, 1, r3
0629     movi    EVENT_FAULT_NOT_TLB, r4
0630 
0631     or  SP, ZERO, r5
0632     movi    CONFIG_PAGE_OFFSET, r6
0633     add r6, r5, r5
0634     getcon  KCR1, SP
0635 
0636     synco   ! for safety
0637     rte ! -> handle_exception, switch back to priv mode again
0638 
0639 LRESVEC_block_end:          /* Marker. Unused. */
0640 
0641     .balign TEXT_SIZE
0642 
0643 /*
0644  * Second level handler for VBR-based exceptions. Pre-handler.
0645  * In common to all stack-frame sensitive handlers.
0646  *
0647  * Inputs:
0648  * (KCR0) Current [current task union]
0649  * (KCR1) Original SP
0650  * (r2)   INTEVT/EXPEVT
0651  * (r3)   appropriate return address
0652  * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
0653  * (r5)   Pointer to reg_save_area
0654  * (SP)   Original SP
0655  *
0656  * Available registers:
0657  * (r6)
0658  * (r18)
0659  * (tr0)
0660  *
0661  */
0662 handle_exception:
0663     /* Common 2nd level handler. */
0664 
0665     /* First thing we need an appropriate stack pointer */
0666     getcon  SSR, r6
0667     shlri   r6, 30, r6
0668     andi    r6, 1, r6
0669     pta stack_ok, tr0
0670     bne r6, ZERO, tr0       /* Original stack pointer is fine */
0671 
0672     /* Set stack pointer for user fault */
0673     getcon  KCR0, SP
0674     movi    THREAD_SIZE, r6     /* Point to the end */
0675     add SP, r6, SP
0676 
0677 stack_ok:
0678 
0679 /* DEBUG : check for underflow/overflow of the kernel stack */
0680     pta no_underflow, tr0
0681     getcon  KCR0, r6
0682     movi    1024, r18
0683     add r6, r18, r6
0684     bge SP, r6, tr0     ! ? below 1k from bottom of stack : danger zone
0685 
0686 /* Just panic to cause a crash. */
0687 bad_sp:
0688     ld.b    r63, 0, r6
0689     nop
0690 
0691 no_underflow:
0692     pta bad_sp, tr0
0693     getcon  kcr0, r6
0694     movi    THREAD_SIZE, r18
0695     add r18, r6, r6
0696     bgt SP, r6, tr0 ! sp above the stack
0697 
0698     /* Make some room for the BASIC frame. */
0699     movi    -(FRAME_SIZE), r6
0700     add SP, r6, SP
0701 
0702 /* Could do this with no stalling if we had another spare register, but the
0703    code below will be OK. */
0704     ld.q    r5, SAVED_R2, r6
0705     ld.q    r5, SAVED_R3, r18
0706     st.q    SP, FRAME_R(2), r6
0707     ld.q    r5, SAVED_R4, r6
0708     st.q    SP, FRAME_R(3), r18
0709     ld.q    r5, SAVED_R5, r18
0710     st.q    SP, FRAME_R(4), r6
0711     ld.q    r5, SAVED_R6, r6
0712     st.q    SP, FRAME_R(5), r18
0713     ld.q    r5, SAVED_R18, r18
0714     st.q    SP, FRAME_R(6), r6
0715     ld.q    r5, SAVED_TR0, r6
0716     st.q    SP, FRAME_R(18), r18
0717     st.q    SP, FRAME_T(0), r6
0718 
0719     /* Keep old SP around */
0720     getcon  KCR1, r6
0721 
0722     /* Save the rest of the general purpose registers */
0723     st.q    SP, FRAME_R(0), r0
0724     st.q    SP, FRAME_R(1), r1
0725     st.q    SP, FRAME_R(7), r7
0726     st.q    SP, FRAME_R(8), r8
0727     st.q    SP, FRAME_R(9), r9
0728     st.q    SP, FRAME_R(10), r10
0729     st.q    SP, FRAME_R(11), r11
0730     st.q    SP, FRAME_R(12), r12
0731     st.q    SP, FRAME_R(13), r13
0732     st.q    SP, FRAME_R(14), r14
0733 
0734     /* SP is somewhere else */
0735     st.q    SP, FRAME_R(15), r6
0736 
0737     st.q    SP, FRAME_R(16), r16
0738     st.q    SP, FRAME_R(17), r17
0739     /* r18 is saved earlier. */
0740     st.q    SP, FRAME_R(19), r19
0741     st.q    SP, FRAME_R(20), r20
0742     st.q    SP, FRAME_R(21), r21
0743     st.q    SP, FRAME_R(22), r22
0744     st.q    SP, FRAME_R(23), r23
0745     st.q    SP, FRAME_R(24), r24
0746     st.q    SP, FRAME_R(25), r25
0747     st.q    SP, FRAME_R(26), r26
0748     st.q    SP, FRAME_R(27), r27
0749     st.q    SP, FRAME_R(28), r28
0750     st.q    SP, FRAME_R(29), r29
0751     st.q    SP, FRAME_R(30), r30
0752     st.q    SP, FRAME_R(31), r31
0753     st.q    SP, FRAME_R(32), r32
0754     st.q    SP, FRAME_R(33), r33
0755     st.q    SP, FRAME_R(34), r34
0756     st.q    SP, FRAME_R(35), r35
0757     st.q    SP, FRAME_R(36), r36
0758     st.q    SP, FRAME_R(37), r37
0759     st.q    SP, FRAME_R(38), r38
0760     st.q    SP, FRAME_R(39), r39
0761     st.q    SP, FRAME_R(40), r40
0762     st.q    SP, FRAME_R(41), r41
0763     st.q    SP, FRAME_R(42), r42
0764     st.q    SP, FRAME_R(43), r43
0765     st.q    SP, FRAME_R(44), r44
0766     st.q    SP, FRAME_R(45), r45
0767     st.q    SP, FRAME_R(46), r46
0768     st.q    SP, FRAME_R(47), r47
0769     st.q    SP, FRAME_R(48), r48
0770     st.q    SP, FRAME_R(49), r49
0771     st.q    SP, FRAME_R(50), r50
0772     st.q    SP, FRAME_R(51), r51
0773     st.q    SP, FRAME_R(52), r52
0774     st.q    SP, FRAME_R(53), r53
0775     st.q    SP, FRAME_R(54), r54
0776     st.q    SP, FRAME_R(55), r55
0777     st.q    SP, FRAME_R(56), r56
0778     st.q    SP, FRAME_R(57), r57
0779     st.q    SP, FRAME_R(58), r58
0780     st.q    SP, FRAME_R(59), r59
0781     st.q    SP, FRAME_R(60), r60
0782     st.q    SP, FRAME_R(61), r61
0783     st.q    SP, FRAME_R(62), r62
0784 
0785     /*
0786      * Save the S* registers.
0787      */
0788     getcon  SSR, r61
0789     st.q    SP, FRAME_S(FSSR), r61
0790     getcon  SPC, r62
0791     st.q    SP, FRAME_S(FSPC), r62
0792     movi    -1, r62         /* Reset syscall_nr */
0793     st.q    SP, FRAME_S(FSYSCALL_ID), r62
0794 
0795     /* Save the rest of the target registers */
0796     gettr   tr1, r6
0797     st.q    SP, FRAME_T(1), r6
0798     gettr   tr2, r6
0799     st.q    SP, FRAME_T(2), r6
0800     gettr   tr3, r6
0801     st.q    SP, FRAME_T(3), r6
0802     gettr   tr4, r6
0803     st.q    SP, FRAME_T(4), r6
0804     gettr   tr5, r6
0805     st.q    SP, FRAME_T(5), r6
0806     gettr   tr6, r6
0807     st.q    SP, FRAME_T(6), r6
0808     gettr   tr7, r6
0809     st.q    SP, FRAME_T(7), r6
0810 
0811     ! setup FP so that unwinder can wind back through nested kernel mode
0812     ! exceptions
0813     add SP, ZERO, r14
0814 
0815     /* For syscall and debug race condition, get TRA now */
0816     getcon  TRA, r5
0817 
0818     /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
0819      * Also set FD, to catch FPU usage in the kernel.
0820      *
0821      * benedict.gaster@superh.com 29/07/2002
0822      *
0823      * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
0824      * same time change BL from 1->0, as any pending interrupt of a level
0825      * higher than he previous value of IMASK will leak through and be
0826      * taken unexpectedly.
0827      *
0828      * To avoid this we raise the IMASK and then issue another PUTCON to
0829      * enable interrupts.
0830          */
0831     getcon  SR, r6
0832     movi    SR_IMASK | SR_FD, r7
0833     or  r6, r7, r6
0834     putcon  r6, SR
0835     movi    SR_UNBLOCK_EXC, r7
0836     and r6, r7, r6
0837     putcon  r6, SR
0838 
0839 
0840     /* Now call the appropriate 3rd level handler */
0841     or  r3, ZERO, LINK
0842     movi    trap_jtable, r3
0843     shlri   r2, 3, r2
0844     ldx.l   r2, r3, r3
0845     shlri   r2, 2, r2
0846     ptabs   r3, tr0
0847     or  SP, ZERO, r3
0848     blink   tr0, ZERO
0849 
0850 /*
0851  * Second level handler for VBR-based exceptions. Post-handlers.
0852  *
0853  * Post-handlers for interrupts (ret_from_irq), exceptions
0854  * (ret_from_exception) and common reentrance doors (restore_all
0855  * to get back to the original context, ret_from_syscall loop to
0856  * check kernel exiting).
0857  *
0858  * ret_with_reschedule and work_notifysig are an inner lables of
0859  * the ret_from_syscall loop.
0860  *
0861  * In common to all stack-frame sensitive handlers.
0862  *
0863  * Inputs:
0864  * (SP)   struct pt_regs *, original register's frame pointer (basic)
0865  *
0866  */
0867     .global ret_from_irq
0868 ret_from_irq:
0869     ld.q    SP, FRAME_S(FSSR), r6
0870     shlri   r6, 30, r6
0871     andi    r6, 1, r6
0872     pta resume_kernel, tr0
0873     bne r6, ZERO, tr0       /* no further checks */
0874     STI()
0875     pta ret_with_reschedule, tr0
0876     blink   tr0, ZERO       /* Do not check softirqs */
0877 
0878     .global ret_from_exception
0879 ret_from_exception:
0880     preempt_stop()
0881 
0882     ld.q    SP, FRAME_S(FSSR), r6
0883     shlri   r6, 30, r6
0884     andi    r6, 1, r6
0885     pta resume_kernel, tr0
0886     bne r6, ZERO, tr0       /* no further checks */
0887 
0888     /* Check softirqs */
0889 
0890 #ifdef CONFIG_PREEMPT
0891     pta   ret_from_syscall, tr0
0892     blink   tr0, ZERO
0893 
0894 resume_kernel:
0895     CLI()
0896 
0897     pta restore_all, tr0
0898 
0899     getcon  KCR0, r6
0900     ld.l    r6, TI_PRE_COUNT, r7
0901     beq/u   r7, ZERO, tr0
0902 
0903 need_resched:
0904     ld.l    r6, TI_FLAGS, r7
0905     movi    (1 << TIF_NEED_RESCHED), r8
0906     and r8, r7, r8
0907     bne r8, ZERO, tr0
0908 
0909     getcon  SR, r7
0910     andi    r7, 0xf0, r7
0911     bne r7, ZERO, tr0
0912 
0913     movi    preempt_schedule_irq, r7
0914     ori r7, 1, r7
0915     ptabs   r7, tr1
0916     blink   tr1, LINK
0917 
0918     pta need_resched, tr1
0919     blink   tr1, ZERO
0920 #endif
0921 
0922     .global ret_from_syscall
0923 ret_from_syscall:
0924 
0925 ret_with_reschedule:
0926     getcon  KCR0, r6        ! r6 contains current_thread_info
0927     ld.l    r6, TI_FLAGS, r7    ! r7 contains current_thread_info->flags
0928 
0929     movi    _TIF_NEED_RESCHED, r8
0930     and r8, r7, r8
0931     pta work_resched, tr0
0932     bne r8, ZERO, tr0
0933 
0934     pta restore_all, tr1
0935 
0936     movi    (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
0937     and r8, r7, r8
0938     pta work_notifysig, tr0
0939     bne r8, ZERO, tr0
0940 
0941     blink   tr1, ZERO
0942 
0943 work_resched:
0944     pta ret_from_syscall, tr0
0945     gettr   tr0, LINK
0946     movi    schedule, r6
0947     ptabs   r6, tr0
0948     blink   tr0, ZERO       /* Call schedule(), return on top */
0949 
0950 work_notifysig:
0951     gettr   tr1, LINK
0952 
0953     movi    do_notify_resume, r6
0954     ptabs   r6, tr0
0955     or  SP, ZERO, r2
0956     or  r7, ZERO, r3
0957     blink   tr0, LINK       /* Call do_notify_resume(regs, current_thread_info->flags), return here */
0958 
0959 restore_all:
0960     /* Do prefetches */
0961 
0962     ld.q    SP, FRAME_T(0), r6
0963     ld.q    SP, FRAME_T(1), r7
0964     ld.q    SP, FRAME_T(2), r8
0965     ld.q    SP, FRAME_T(3), r9
0966     ptabs   r6, tr0
0967     ptabs   r7, tr1
0968     ptabs   r8, tr2
0969     ptabs   r9, tr3
0970     ld.q    SP, FRAME_T(4), r6
0971     ld.q    SP, FRAME_T(5), r7
0972     ld.q    SP, FRAME_T(6), r8
0973     ld.q    SP, FRAME_T(7), r9
0974     ptabs   r6, tr4
0975     ptabs   r7, tr5
0976     ptabs   r8, tr6
0977     ptabs   r9, tr7
0978 
0979     ld.q    SP, FRAME_R(0), r0
0980     ld.q    SP, FRAME_R(1), r1
0981     ld.q    SP, FRAME_R(2), r2
0982     ld.q    SP, FRAME_R(3), r3
0983     ld.q    SP, FRAME_R(4), r4
0984     ld.q    SP, FRAME_R(5), r5
0985     ld.q    SP, FRAME_R(6), r6
0986     ld.q    SP, FRAME_R(7), r7
0987     ld.q    SP, FRAME_R(8), r8
0988     ld.q    SP, FRAME_R(9), r9
0989     ld.q    SP, FRAME_R(10), r10
0990     ld.q    SP, FRAME_R(11), r11
0991     ld.q    SP, FRAME_R(12), r12
0992     ld.q    SP, FRAME_R(13), r13
0993     ld.q    SP, FRAME_R(14), r14
0994 
0995     ld.q    SP, FRAME_R(16), r16
0996     ld.q    SP, FRAME_R(17), r17
0997     ld.q    SP, FRAME_R(18), r18
0998     ld.q    SP, FRAME_R(19), r19
0999     ld.q    SP, FRAME_R(20), r20
1000     ld.q    SP, FRAME_R(21), r21
1001     ld.q    SP, FRAME_R(22), r22
1002     ld.q    SP, FRAME_R(23), r23
1003     ld.q    SP, FRAME_R(24), r24
1004     ld.q    SP, FRAME_R(25), r25
1005     ld.q    SP, FRAME_R(26), r26
1006     ld.q    SP, FRAME_R(27), r27
1007     ld.q    SP, FRAME_R(28), r28
1008     ld.q    SP, FRAME_R(29), r29
1009     ld.q    SP, FRAME_R(30), r30
1010     ld.q    SP, FRAME_R(31), r31
1011     ld.q    SP, FRAME_R(32), r32
1012     ld.q    SP, FRAME_R(33), r33
1013     ld.q    SP, FRAME_R(34), r34
1014     ld.q    SP, FRAME_R(35), r35
1015     ld.q    SP, FRAME_R(36), r36
1016     ld.q    SP, FRAME_R(37), r37
1017     ld.q    SP, FRAME_R(38), r38
1018     ld.q    SP, FRAME_R(39), r39
1019     ld.q    SP, FRAME_R(40), r40
1020     ld.q    SP, FRAME_R(41), r41
1021     ld.q    SP, FRAME_R(42), r42
1022     ld.q    SP, FRAME_R(43), r43
1023     ld.q    SP, FRAME_R(44), r44
1024     ld.q    SP, FRAME_R(45), r45
1025     ld.q    SP, FRAME_R(46), r46
1026     ld.q    SP, FRAME_R(47), r47
1027     ld.q    SP, FRAME_R(48), r48
1028     ld.q    SP, FRAME_R(49), r49
1029     ld.q    SP, FRAME_R(50), r50
1030     ld.q    SP, FRAME_R(51), r51
1031     ld.q    SP, FRAME_R(52), r52
1032     ld.q    SP, FRAME_R(53), r53
1033     ld.q    SP, FRAME_R(54), r54
1034     ld.q    SP, FRAME_R(55), r55
1035     ld.q    SP, FRAME_R(56), r56
1036     ld.q    SP, FRAME_R(57), r57
1037     ld.q    SP, FRAME_R(58), r58
1038 
1039     getcon  SR, r59
1040     movi    SR_BLOCK_EXC, r60
1041     or  r59, r60, r59
1042     putcon  r59, SR         /* SR.BL = 1, keep nesting out */
1043     ld.q    SP, FRAME_S(FSSR), r61
1044     ld.q    SP, FRAME_S(FSPC), r62
1045     movi    SR_ASID_MASK, r60
1046     and r59, r60, r59
1047     andc    r61, r60, r61       /* Clear out older ASID */
1048     or  r59, r61, r61       /* Retain current ASID */
1049     putcon  r61, SSR
1050     putcon  r62, SPC
1051 
1052     /* Ignore FSYSCALL_ID */
1053 
1054     ld.q    SP, FRAME_R(59), r59
1055     ld.q    SP, FRAME_R(60), r60
1056     ld.q    SP, FRAME_R(61), r61
1057     ld.q    SP, FRAME_R(62), r62
1058 
1059     /* Last touch */
1060     ld.q    SP, FRAME_R(15), SP
1061     rte
1062     nop
1063 
1064 /*
1065  * Third level handlers for VBR-based exceptions. Adapting args to
1066  * and/or deflecting to fourth level handlers.
1067  *
1068  * Fourth level handlers interface.
1069  * Most are C-coded handlers directly pointed by the trap_jtable.
1070  * (Third = Fourth level)
1071  * Inputs:
1072  * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
1073  *    IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1074  * (r3)   struct pt_regs *, original register's frame pointer
1075  * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1076  * (r5)   TRA control register (for syscall/debug benefit only)
1077  * (LINK) return address
1078  * (SP)   = r3
1079  *
1080  * Kernel TLB fault handlers will get a slightly different interface.
1081  * (r2)   struct pt_regs *, original register's frame pointer
1082  * (r3)   page fault error code (see asm/thread_info.h)
1083  * (r4)   Effective Address of fault
1084  * (LINK) return address
1085  * (SP)   = r2
1086  *
1087  * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1088  *
1089  */
1090 #ifdef CONFIG_MMU
1091 tlb_miss_load:
1092     or  SP, ZERO, r2
1093     or  ZERO, ZERO, r3      /* Read */
1094     getcon  TEA, r4
1095     pta call_do_page_fault, tr0
1096     beq ZERO, ZERO, tr0
1097 
1098 tlb_miss_store:
1099     or  SP, ZERO, r2
1100     movi    FAULT_CODE_WRITE, r3        /* Write */
1101     getcon  TEA, r4
1102     pta call_do_page_fault, tr0
1103     beq ZERO, ZERO, tr0
1104 
1105 itlb_miss_or_IRQ:
1106     pta its_IRQ, tr0
1107     beqi/u  r4, EVENT_INTERRUPT, tr0
1108 
1109     /* ITLB miss */
1110     or  SP, ZERO, r2
1111     movi    FAULT_CODE_ITLB, r3
1112     getcon  TEA, r4
1113     /* Fall through */
1114 
1115 call_do_page_fault:
1116     movi    do_page_fault, r6
1117         ptabs   r6, tr0
1118         blink   tr0, ZERO
1119 #endif /* CONFIG_MMU */
1120 
1121 fpu_error_or_IRQA:
1122     pta its_IRQ, tr0
1123     beqi/l  r4, EVENT_INTERRUPT, tr0
1124 #ifdef CONFIG_SH_FPU
1125     movi    fpu_state_restore_trap_handler, r6
1126 #else
1127     movi    do_exception_error, r6
1128 #endif
1129     ptabs   r6, tr0
1130     blink   tr0, ZERO
1131 
1132 fpu_error_or_IRQB:
1133     pta its_IRQ, tr0
1134     beqi/l  r4, EVENT_INTERRUPT, tr0
1135 #ifdef CONFIG_SH_FPU
1136     movi    fpu_state_restore_trap_handler, r6
1137 #else
1138     movi    do_exception_error, r6
1139 #endif
1140     ptabs   r6, tr0
1141     blink   tr0, ZERO
1142 
1143 its_IRQ:
1144     movi    do_IRQ, r6
1145     ptabs   r6, tr0
1146     blink   tr0, ZERO
1147 
1148 /*
1149  * system_call/unknown_trap third level handler:
1150  *
1151  * Inputs:
1152  * (r2)   fault/interrupt code, entry number (TRAP = 11)
1153  * (r3)   struct pt_regs *, original register's frame pointer
1154  * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1155  * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1156  * (SP)   = r3
1157  * (LINK) return address: ret_from_exception
1158  * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1159  *
1160  * Outputs:
1161  * (*r3)  Syscall reply (Saved r2)
1162  * (LINK) In case of syscall only it can be scrapped.
1163  *        Common second level post handler will be ret_from_syscall.
1164  *        Common (non-trace) exit point to that is syscall_ret (saving
1165  *        result to r2). Common bad exit point is syscall_bad (returning
1166  *        ENOSYS then saved to r2).
1167  *
1168  */
1169 
1170 unknown_trap:
1171     /* Unknown Trap or User Trace */
1172     movi    do_unknown_trapa, r6
1173     ptabs   r6, tr0
1174         ld.q    r3, FRAME_R(9), r2  /* r2 = #arg << 16 | syscall # */
1175         andi    r2, 0x1ff, r2       /* r2 = syscall # */
1176     blink   tr0, LINK
1177 
1178     pta syscall_ret, tr0
1179     blink   tr0, ZERO
1180 
1181         /* New syscall implementation*/
1182 system_call:
1183     pta unknown_trap, tr0
1184         or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
1185         shlri   r4, 20, r4
1186     bnei    r4, 1, tr0      /* unknown_trap if not 0x1yzzzz */
1187 
1188         /* It's a system call */
1189     st.q    r3, FRAME_S(FSYSCALL_ID), r5    /* ID (0x1yzzzz) -> stack */
1190     andi    r5, 0x1ff, r5           /* syscall # -> r5    */
1191 
1192     STI()
1193 
1194     pta syscall_allowed, tr0
1195     movi    NR_syscalls - 1, r4 /* Last valid */
1196     bgeu/l  r4, r5, tr0
1197 
1198 syscall_bad:
1199     /* Return ENOSYS ! */
1200     movi    -(ENOSYS), r2       /* Fall-through */
1201 
1202     .global syscall_ret
1203 syscall_ret:
1204     st.q    SP, FRAME_R(9), r2  /* Expecting SP back to BASIC frame */
1205     ld.q    SP, FRAME_S(FSPC), r2
1206     addi    r2, 4, r2       /* Move PC, being pre-execution event */
1207     st.q    SP, FRAME_S(FSPC), r2
1208     pta ret_from_syscall, tr0
1209     blink   tr0, ZERO
1210 
1211 
1212 /*  A different return path for ret_from_fork, because we now need
1213  *  to call schedule_tail with the later kernels. Because prev is
1214  *  loaded into r2 by switch_to() means we can just call it straight  away
1215  */
1216 
1217 .global ret_from_fork
1218 ret_from_fork:
1219 
1220     movi    schedule_tail,r5
1221     ori r5, 1, r5
1222     ptabs   r5, tr0
1223     blink   tr0, LINK
1224 
1225     ld.q    SP, FRAME_S(FSPC), r2
1226     addi    r2, 4, r2       /* Move PC, being pre-execution event */
1227     st.q    SP, FRAME_S(FSPC), r2
1228     pta ret_from_syscall, tr0
1229     blink   tr0, ZERO
1230 
1231 .global ret_from_kernel_thread
1232 ret_from_kernel_thread:
1233 
1234     movi    schedule_tail,r5
1235     ori r5, 1, r5
1236     ptabs   r5, tr0
1237     blink   tr0, LINK
1238 
1239     ld.q    SP, FRAME_R(2), r2
1240     ld.q    SP, FRAME_R(3), r3
1241     ptabs   r3, tr0
1242     blink   tr0, LINK
1243 
1244     ld.q    SP, FRAME_S(FSPC), r2
1245     addi    r2, 4, r2       /* Move PC, being pre-execution event */
1246     st.q    SP, FRAME_S(FSPC), r2
1247     pta ret_from_syscall, tr0
1248     blink   tr0, ZERO
1249 
1250 syscall_allowed:
1251     /* Use LINK to deflect the exit point, default is syscall_ret */
1252     pta syscall_ret, tr0
1253     gettr   tr0, LINK
1254     pta syscall_notrace, tr0
1255 
1256     getcon  KCR0, r2
1257     ld.l    r2, TI_FLAGS, r4
1258     movi    _TIF_WORK_SYSCALL_MASK, r6
1259     and r6, r4, r6
1260     beq/l   r6, ZERO, tr0
1261 
1262     /* Trace it by calling syscall_trace before and after */
1263     movi    do_syscall_trace_enter, r4
1264     or  SP, ZERO, r2
1265     ptabs   r4, tr0
1266     blink   tr0, LINK
1267 
1268     /* Save the retval */
1269     st.q    SP, FRAME_R(2), r2
1270 
1271     /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
1272     ld.q    SP, FRAME_S(FSYSCALL_ID), r5
1273     andi    r5, 0x1ff, r5
1274 
1275     pta syscall_ret_trace, tr0
1276     gettr   tr0, LINK
1277 
1278 syscall_notrace:
1279     /* Now point to the appropriate 4th level syscall handler */
1280     movi    sys_call_table, r4
1281     shlli   r5, 2, r5
1282     ldx.l   r4, r5, r5
1283     ptabs   r5, tr0
1284 
1285     /* Prepare original args */
1286     ld.q    SP, FRAME_R(2), r2
1287     ld.q    SP, FRAME_R(3), r3
1288     ld.q    SP, FRAME_R(4), r4
1289     ld.q    SP, FRAME_R(5), r5
1290     ld.q    SP, FRAME_R(6), r6
1291     ld.q    SP, FRAME_R(7), r7
1292 
1293     /* And now the trick for those syscalls requiring regs * ! */
1294     or  SP, ZERO, r8
1295 
1296     /* Call it */
1297     blink   tr0, ZERO   /* LINK is already properly set */
1298 
1299 syscall_ret_trace:
1300     /* We get back here only if under trace */
1301     st.q    SP, FRAME_R(9), r2  /* Save return value */
1302 
1303     movi    do_syscall_trace_leave, LINK
1304     or  SP, ZERO, r2
1305     ptabs   LINK, tr0
1306     blink   tr0, LINK
1307 
1308     /* This needs to be done after any syscall tracing */
1309     ld.q    SP, FRAME_S(FSPC), r2
1310     addi    r2, 4, r2   /* Move PC, being pre-execution event */
1311     st.q    SP, FRAME_S(FSPC), r2
1312 
1313     pta ret_from_syscall, tr0
1314     blink   tr0, ZERO       /* Resume normal return sequence */
1315 
1316 /*
1317  * --- Switch to running under a particular ASID and return the previous ASID value
1318  * --- The caller is assumed to have done a cli before calling this.
1319  *
1320  * Input r2 : new ASID
1321  * Output r2 : old ASID
1322  */
1323 
1324     .global switch_and_save_asid
1325 switch_and_save_asid:
1326     getcon  sr, r0
1327     movi    255, r4
1328     shlli   r4, 16, r4  /* r4 = mask to select ASID */
1329     and r0, r4, r3  /* r3 = shifted old ASID */
1330     andi    r2, 255, r2 /* mask down new ASID */
1331     shlli   r2, 16, r2  /* align new ASID against SR.ASID */
1332     andc    r0, r4, r0  /* efface old ASID from SR */
1333     or  r0, r2, r0  /* insert the new ASID */
1334     putcon  r0, ssr
1335     movi    1f, r0
1336     putcon  r0, spc
1337     rte
1338     nop
1339 1:
1340     ptabs   LINK, tr0
1341     shlri   r3, 16, r2  /* r2 = old ASID */
1342     blink tr0, r63
1343 
1344     .global route_to_panic_handler
1345 route_to_panic_handler:
1346     /* Switch to real mode, goto panic_handler, don't return.  Useful for
1347        last-chance debugging, e.g. if no output wants to go to the console.
1348        */
1349 
1350     movi    panic_handler - CONFIG_PAGE_OFFSET, r1
1351     ptabs   r1, tr0
1352     pta 1f, tr1
1353     gettr   tr1, r0
1354     putcon  r0, spc
1355     getcon  sr, r0
1356     movi    1, r1
1357     shlli   r1, 31, r1
1358     andc    r0, r1, r0
1359     putcon  r0, ssr
1360     rte
1361     nop
1362 1:  /* Now in real mode */
1363     blink tr0, r63
1364     nop
1365 
1366     .global peek_real_address_q
1367 peek_real_address_q:
1368     /* Two args:
1369        r2 : real mode address to peek
1370        r2(out) : result quadword
1371 
1372        This is provided as a cheapskate way of manipulating device
1373        registers for debugging (to avoid the need to ioremap the debug
1374        module, and to avoid the need to ioremap the watchpoint
1375        controller in a way that identity maps sufficient bits to avoid the
1376        SH5-101 cut2 silicon defect).
1377 
1378        This code is not performance critical
1379     */
1380 
1381     add.l   r2, r63, r2 /* sign extend address */
1382     getcon  sr, r0      /* r0 = saved original SR */
1383     movi    1, r1
1384     shlli   r1, 28, r1
1385     or  r0, r1, r1  /* r0 with block bit set */
1386     putcon  r1, sr      /* now in critical section */
1387     movi    1, r36
1388     shlli   r36, 31, r36
1389     andc    r1, r36, r1 /* turn sr.mmu off in real mode section */
1390 
1391     putcon  r1, ssr
1392     movi    .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1393     movi    1f, r37     /* virtual mode return addr */
1394     putcon  r36, spc
1395 
1396     synco
1397     rte
1398     nop
1399 
1400 .peek0: /* come here in real mode, don't touch caches!!
1401            still in critical section (sr.bl==1) */
1402     putcon  r0, ssr
1403     putcon  r37, spc
1404     /* Here's the actual peek.  If the address is bad, all bets are now off
1405      * what will happen (handlers invoked in real-mode = bad news) */
1406     ld.q    r2, 0, r2
1407     synco
1408     rte /* Back to virtual mode */
1409     nop
1410 
1411 1:
1412     ptabs   LINK, tr0
1413     blink   tr0, r63
1414 
1415     .global poke_real_address_q
1416 poke_real_address_q:
1417     /* Two args:
1418        r2 : real mode address to poke
1419        r3 : quadword value to write.
1420 
1421        This is provided as a cheapskate way of manipulating device
1422        registers for debugging (to avoid the need to ioremap the debug
1423        module, and to avoid the need to ioremap the watchpoint
1424        controller in a way that identity maps sufficient bits to avoid the
1425        SH5-101 cut2 silicon defect).
1426 
1427        This code is not performance critical
1428     */
1429 
1430     add.l   r2, r63, r2 /* sign extend address */
1431     getcon  sr, r0      /* r0 = saved original SR */
1432     movi    1, r1
1433     shlli   r1, 28, r1
1434     or  r0, r1, r1  /* r0 with block bit set */
1435     putcon  r1, sr      /* now in critical section */
1436     movi    1, r36
1437     shlli   r36, 31, r36
1438     andc    r1, r36, r1 /* turn sr.mmu off in real mode section */
1439 
1440     putcon  r1, ssr
1441     movi    .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1442     movi    1f, r37     /* virtual mode return addr */
1443     putcon  r36, spc
1444 
1445     synco
1446     rte
1447     nop
1448 
1449 .poke0: /* come here in real mode, don't touch caches!!
1450            still in critical section (sr.bl==1) */
1451     putcon  r0, ssr
1452     putcon  r37, spc
1453     /* Here's the actual poke.  If the address is bad, all bets are now off
1454      * what will happen (handlers invoked in real-mode = bad news) */
1455     st.q    r2, 0, r3
1456     synco
1457     rte /* Back to virtual mode */
1458     nop
1459 
1460 1:
1461     ptabs   LINK, tr0
1462     blink   tr0, r63
1463 
1464 #ifdef CONFIG_MMU
1465 /*
1466  * --- User Access Handling Section
1467  */
1468 
1469 /*
1470  * User Access support. It all moved to non inlined Assembler
1471  * functions in here.
1472  *
1473  * __kernel_size_t __copy_user(void *__to, const void *__from,
1474  *                 __kernel_size_t __n)
1475  *
1476  * Inputs:
1477  * (r2)  target address
1478  * (r3)  source address
1479  * (r4)  size in bytes
1480  *
1481  * Ouputs:
1482  * (*r2) target data
1483  * (r2)  non-copied bytes
1484  *
1485  * If a fault occurs on the user pointer, bail out early and return the
1486  * number of bytes not copied in r2.
1487  * Strategy : for large blocks, call a real memcpy function which can
1488  * move >1 byte at a time using unaligned ld/st instructions, and can
1489  * manipulate the cache using prefetch + alloco to improve the speed
1490  * further.  If a fault occurs in that function, just revert to the
1491  * byte-by-byte approach used for small blocks; this is rare so the
1492  * performance hit for that case does not matter.
1493  *
1494  * For small blocks it's not worth the overhead of setting up and calling
1495  * the memcpy routine; do the copy a byte at a time.
1496  *
1497  */
1498     .global __copy_user
1499 __copy_user:
1500     pta __copy_user_byte_by_byte, tr1
1501     movi    16, r0 ! this value is a best guess, should tune it by benchmarking
1502     bge/u   r0, r4, tr1
1503     pta copy_user_memcpy, tr0
1504     addi    SP, -32, SP
1505     /* Save arguments in case we have to fix-up unhandled page fault */
1506     st.q    SP, 0, r2
1507     st.q    SP, 8, r3
1508     st.q    SP, 16, r4
1509     st.q    SP, 24, r35 ! r35 is callee-save
1510     /* Save LINK in a register to reduce RTS time later (otherwise
1511        ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1512     ori LINK, 0, r35
1513     blink   tr0, LINK
1514 
1515     /* Copy completed normally if we get back here */
1516     ptabs   r35, tr0
1517     ld.q    SP, 24, r35
1518     /* don't restore r2-r4, pointless */
1519     /* set result=r2 to zero as the copy must have succeeded. */
1520     or  r63, r63, r2
1521     addi    SP, 32, SP
1522     blink   tr0, r63 ! RTS
1523 
1524     .global __copy_user_fixup
1525 __copy_user_fixup:
1526     /* Restore stack frame */
1527     ori r35, 0, LINK
1528     ld.q    SP, 24, r35
1529     ld.q    SP, 16, r4
1530     ld.q    SP,  8, r3
1531     ld.q    SP,  0, r2
1532     addi    SP, 32, SP
1533     /* Fall through to original code, in the 'same' state we entered with */
1534 
1535 /* The slow byte-by-byte method is used if the fast copy traps due to a bad
1536    user address.  In that rare case, the speed drop can be tolerated. */
1537 __copy_user_byte_by_byte:
1538     pta ___copy_user_exit, tr1
1539     pta ___copy_user1, tr0
1540     beq/u   r4, r63, tr1    /* early exit for zero length copy */
1541     sub r2, r3, r0
1542     addi    r0, -1, r0
1543 
1544 ___copy_user1:
1545     ld.b    r3, 0, r5       /* Fault address 1 */
1546 
1547     /* Could rewrite this to use just 1 add, but the second comes 'free'
1548        due to load latency */
1549     addi    r3, 1, r3
1550     addi    r4, -1, r4      /* No real fixup required */
1551 ___copy_user2:
1552     stx.b   r3, r0, r5      /* Fault address 2 */
1553     bne     r4, ZERO, tr0
1554 
1555 ___copy_user_exit:
1556     or  r4, ZERO, r2
1557     ptabs   LINK, tr0
1558     blink   tr0, ZERO
1559 
1560 /*
1561  * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1562  *
1563  * Inputs:
1564  * (r2)  target address
1565  * (r3)  size in bytes
1566  *
1567  * Ouputs:
1568  * (*r2) zero-ed target data
1569  * (r2)  non-zero-ed bytes
1570  */
1571     .global __clear_user
1572 __clear_user:
1573     pta ___clear_user_exit, tr1
1574     pta ___clear_user1, tr0
1575     beq/u   r3, r63, tr1
1576 
1577 ___clear_user1:
1578     st.b    r2, 0, ZERO     /* Fault address */
1579     addi    r2, 1, r2
1580     addi    r3, -1, r3      /* No real fixup required */
1581     bne     r3, ZERO, tr0
1582 
1583 ___clear_user_exit:
1584     or  r3, ZERO, r2
1585     ptabs   LINK, tr0
1586     blink   tr0, ZERO
1587 
1588 #endif /* CONFIG_MMU */
1589 
1590 /*
1591  * extern long __get_user_asm_?(void *val, long addr)
1592  *
1593  * Inputs:
1594  * (r2)  dest address
1595  * (r3)  source address (in User Space)
1596  *
1597  * Ouputs:
1598  * (r2)  -EFAULT (faulting)
1599  *       0   (not faulting)
1600  */
1601     .global __get_user_asm_b
1602 __get_user_asm_b:
1603     or  r2, ZERO, r4
1604     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1605 
1606 ___get_user_asm_b1:
1607     ld.b    r3, 0, r5       /* r5 = data */
1608     st.b    r4, 0, r5
1609     or  ZERO, ZERO, r2
1610 
1611 ___get_user_asm_b_exit:
1612     ptabs   LINK, tr0
1613     blink   tr0, ZERO
1614 
1615 
1616     .global __get_user_asm_w
1617 __get_user_asm_w:
1618     or  r2, ZERO, r4
1619     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1620 
1621 ___get_user_asm_w1:
1622     ld.w    r3, 0, r5       /* r5 = data */
1623     st.w    r4, 0, r5
1624     or  ZERO, ZERO, r2
1625 
1626 ___get_user_asm_w_exit:
1627     ptabs   LINK, tr0
1628     blink   tr0, ZERO
1629 
1630 
1631     .global __get_user_asm_l
1632 __get_user_asm_l:
1633     or  r2, ZERO, r4
1634     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1635 
1636 ___get_user_asm_l1:
1637     ld.l    r3, 0, r5       /* r5 = data */
1638     st.l    r4, 0, r5
1639     or  ZERO, ZERO, r2
1640 
1641 ___get_user_asm_l_exit:
1642     ptabs   LINK, tr0
1643     blink   tr0, ZERO
1644 
1645 
1646     .global __get_user_asm_q
1647 __get_user_asm_q:
1648     or  r2, ZERO, r4
1649     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1650 
1651 ___get_user_asm_q1:
1652     ld.q    r3, 0, r5       /* r5 = data */
1653     st.q    r4, 0, r5
1654     or  ZERO, ZERO, r2
1655 
1656 ___get_user_asm_q_exit:
1657     ptabs   LINK, tr0
1658     blink   tr0, ZERO
1659 
1660 /*
1661  * extern long __put_user_asm_?(void *pval, long addr)
1662  *
1663  * Inputs:
1664  * (r2)  kernel pointer to value
1665  * (r3)  dest address (in User Space)
1666  *
1667  * Ouputs:
1668  * (r2)  -EFAULT (faulting)
1669  *       0   (not faulting)
1670  */
1671     .global __put_user_asm_b
1672 __put_user_asm_b:
1673     ld.b    r2, 0, r4       /* r4 = data */
1674     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1675 
1676 ___put_user_asm_b1:
1677     st.b    r3, 0, r4
1678     or  ZERO, ZERO, r2
1679 
1680 ___put_user_asm_b_exit:
1681     ptabs   LINK, tr0
1682     blink   tr0, ZERO
1683 
1684 
1685     .global __put_user_asm_w
1686 __put_user_asm_w:
1687     ld.w    r2, 0, r4       /* r4 = data */
1688     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1689 
1690 ___put_user_asm_w1:
1691     st.w    r3, 0, r4
1692     or  ZERO, ZERO, r2
1693 
1694 ___put_user_asm_w_exit:
1695     ptabs   LINK, tr0
1696     blink   tr0, ZERO
1697 
1698 
1699     .global __put_user_asm_l
1700 __put_user_asm_l:
1701     ld.l    r2, 0, r4       /* r4 = data */
1702     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1703 
1704 ___put_user_asm_l1:
1705     st.l    r3, 0, r4
1706     or  ZERO, ZERO, r2
1707 
1708 ___put_user_asm_l_exit:
1709     ptabs   LINK, tr0
1710     blink   tr0, ZERO
1711 
1712 
1713     .global __put_user_asm_q
1714 __put_user_asm_q:
1715     ld.q    r2, 0, r4       /* r4 = data */
1716     movi    -(EFAULT), r2       /* r2 = reply, no real fixup */
1717 
1718 ___put_user_asm_q1:
1719     st.q    r3, 0, r4
1720     or  ZERO, ZERO, r2
1721 
1722 ___put_user_asm_q_exit:
1723     ptabs   LINK, tr0
1724     blink   tr0, ZERO
1725 
1726 panic_stash_regs:
1727     /* The idea is : when we get an unhandled panic, we dump the registers
1728        to a known memory location, the just sit in a tight loop.
1729        This allows the human to look at the memory region through the GDB
1730        session (assuming the debug module's SHwy initiator isn't locked up
1731        or anything), to hopefully analyze the cause of the panic. */
1732 
1733     /* On entry, former r15 (SP) is in DCR
1734        former r0  is at resvec_saved_area + 0
1735        former r1  is at resvec_saved_area + 8
1736        former tr0 is at resvec_saved_area + 32
1737        DCR is the only register whose value is lost altogether.
1738     */
1739 
1740     movi    0xffffffff80000000, r0 ! phy of dump area
1741     ld.q    SP, 0x000, r1   ! former r0
1742     st.q    r0,  0x000, r1
1743     ld.q    SP, 0x008, r1   ! former r1
1744     st.q    r0,  0x008, r1
1745     st.q    r0,  0x010, r2
1746     st.q    r0,  0x018, r3
1747     st.q    r0,  0x020, r4
1748     st.q    r0,  0x028, r5
1749     st.q    r0,  0x030, r6
1750     st.q    r0,  0x038, r7
1751     st.q    r0,  0x040, r8
1752     st.q    r0,  0x048, r9
1753     st.q    r0,  0x050, r10
1754     st.q    r0,  0x058, r11
1755     st.q    r0,  0x060, r12
1756     st.q    r0,  0x068, r13
1757     st.q    r0,  0x070, r14
1758     getcon  dcr, r14
1759     st.q    r0,  0x078, r14
1760     st.q    r0,  0x080, r16
1761     st.q    r0,  0x088, r17
1762     st.q    r0,  0x090, r18
1763     st.q    r0,  0x098, r19
1764     st.q    r0,  0x0a0, r20
1765     st.q    r0,  0x0a8, r21
1766     st.q    r0,  0x0b0, r22
1767     st.q    r0,  0x0b8, r23
1768     st.q    r0,  0x0c0, r24
1769     st.q    r0,  0x0c8, r25
1770     st.q    r0,  0x0d0, r26
1771     st.q    r0,  0x0d8, r27
1772     st.q    r0,  0x0e0, r28
1773     st.q    r0,  0x0e8, r29
1774     st.q    r0,  0x0f0, r30
1775     st.q    r0,  0x0f8, r31
1776     st.q    r0,  0x100, r32
1777     st.q    r0,  0x108, r33
1778     st.q    r0,  0x110, r34
1779     st.q    r0,  0x118, r35
1780     st.q    r0,  0x120, r36
1781     st.q    r0,  0x128, r37
1782     st.q    r0,  0x130, r38
1783     st.q    r0,  0x138, r39
1784     st.q    r0,  0x140, r40
1785     st.q    r0,  0x148, r41
1786     st.q    r0,  0x150, r42
1787     st.q    r0,  0x158, r43
1788     st.q    r0,  0x160, r44
1789     st.q    r0,  0x168, r45
1790     st.q    r0,  0x170, r46
1791     st.q    r0,  0x178, r47
1792     st.q    r0,  0x180, r48
1793     st.q    r0,  0x188, r49
1794     st.q    r0,  0x190, r50
1795     st.q    r0,  0x198, r51
1796     st.q    r0,  0x1a0, r52
1797     st.q    r0,  0x1a8, r53
1798     st.q    r0,  0x1b0, r54
1799     st.q    r0,  0x1b8, r55
1800     st.q    r0,  0x1c0, r56
1801     st.q    r0,  0x1c8, r57
1802     st.q    r0,  0x1d0, r58
1803     st.q    r0,  0x1d8, r59
1804     st.q    r0,  0x1e0, r60
1805     st.q    r0,  0x1e8, r61
1806     st.q    r0,  0x1f0, r62
1807     st.q    r0,  0x1f8, r63 ! bogus, but for consistency's sake...
1808 
1809     ld.q    SP, 0x020, r1  ! former tr0
1810     st.q    r0,  0x200, r1
1811     gettr   tr1, r1
1812     st.q    r0,  0x208, r1
1813     gettr   tr2, r1
1814     st.q    r0,  0x210, r1
1815     gettr   tr3, r1
1816     st.q    r0,  0x218, r1
1817     gettr   tr4, r1
1818     st.q    r0,  0x220, r1
1819     gettr   tr5, r1
1820     st.q    r0,  0x228, r1
1821     gettr   tr6, r1
1822     st.q    r0,  0x230, r1
1823     gettr   tr7, r1
1824     st.q    r0,  0x238, r1
1825 
1826     getcon  sr,  r1
1827     getcon  ssr,  r2
1828     getcon  pssr,  r3
1829     getcon  spc,  r4
1830     getcon  pspc,  r5
1831     getcon  intevt,  r6
1832     getcon  expevt,  r7
1833     getcon  pexpevt,  r8
1834     getcon  tra,  r9
1835     getcon  tea,  r10
1836     getcon  kcr0, r11
1837     getcon  kcr1, r12
1838     getcon  vbr,  r13
1839     getcon  resvec,  r14
1840 
1841     st.q    r0,  0x240, r1
1842     st.q    r0,  0x248, r2
1843     st.q    r0,  0x250, r3
1844     st.q    r0,  0x258, r4
1845     st.q    r0,  0x260, r5
1846     st.q    r0,  0x268, r6
1847     st.q    r0,  0x270, r7
1848     st.q    r0,  0x278, r8
1849     st.q    r0,  0x280, r9
1850     st.q    r0,  0x288, r10
1851     st.q    r0,  0x290, r11
1852     st.q    r0,  0x298, r12
1853     st.q    r0,  0x2a0, r13
1854     st.q    r0,  0x2a8, r14
1855 
1856     getcon  SPC,r2
1857     getcon  SSR,r3
1858     getcon  EXPEVT,r4
1859     /* Prepare to jump to C - physical address */
1860     movi    panic_handler-CONFIG_PAGE_OFFSET, r1
1861     ori r1, 1, r1
1862     ptabs   r1, tr0
1863     getcon  DCR, SP
1864     blink   tr0, ZERO
1865     nop
1866     nop
1867     nop
1868     nop
1869 
1870 
1871 
1872 
1873 /*
1874  * --- Signal Handling Section
1875  */
1876 
1877 /*
1878  * extern long long _sa_default_rt_restorer
1879  * extern long long _sa_default_restorer
1880  *
1881  *       or, better,
1882  *
1883  * extern void _sa_default_rt_restorer(void)
1884  * extern void _sa_default_restorer(void)
1885  *
1886  * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1887  * from user space. Copied into user space by signal management.
1888  * Both must be quad aligned and 2 quad long (4 instructions).
1889  *
1890  */
1891     .balign 8
1892     .global sa_default_rt_restorer
1893 sa_default_rt_restorer:
1894     movi    0x10, r9
1895     shori   __NR_rt_sigreturn, r9
1896     trapa   r9
1897     nop
1898 
1899     .balign 8
1900     .global sa_default_restorer
1901 sa_default_restorer:
1902     movi    0x10, r9
1903     shori   __NR_sigreturn, r9
1904     trapa   r9
1905     nop
1906 
1907 /*
1908  * --- __ex_table Section
1909  */
1910 
1911 /*
1912  * User Access Exception Table.
1913  */
1914     .section    __ex_table,  "a"
1915 
1916     .global asm_uaccess_start   /* Just a marker */
1917 asm_uaccess_start:
1918 
1919 #ifdef CONFIG_MMU
1920     .long   ___copy_user1, ___copy_user_exit
1921     .long   ___copy_user2, ___copy_user_exit
1922     .long   ___clear_user1, ___clear_user_exit
1923 #endif
1924     .long   ___get_user_asm_b1, ___get_user_asm_b_exit
1925     .long   ___get_user_asm_w1, ___get_user_asm_w_exit
1926     .long   ___get_user_asm_l1, ___get_user_asm_l_exit
1927     .long   ___get_user_asm_q1, ___get_user_asm_q_exit
1928     .long   ___put_user_asm_b1, ___put_user_asm_b_exit
1929     .long   ___put_user_asm_w1, ___put_user_asm_w_exit
1930     .long   ___put_user_asm_l1, ___put_user_asm_l_exit
1931     .long   ___put_user_asm_q1, ___put_user_asm_q_exit
1932 
1933     .global asm_uaccess_end     /* Just a marker */
1934 asm_uaccess_end:
1935 
1936 
1937 
1938 
1939 /*
1940  * --- .init.text Section
1941  */
1942 
1943     __INIT
1944 
1945 /*
1946  * void trap_init (void)
1947  *
1948  */
1949     .global trap_init
1950 trap_init:
1951     addi    SP, -24, SP         /* Room to save r28/r29/r30 */
1952     st.q    SP, 0, r28
1953     st.q    SP, 8, r29
1954     st.q    SP, 16, r30
1955 
1956     /* Set VBR and RESVEC */
1957     movi    LVBR_block, r19
1958     andi    r19, -4, r19            /* reset MMUOFF + reserved */
1959     /* For RESVEC exceptions we force the MMU off, which means we need the
1960        physical address. */
1961     movi    LRESVEC_block-CONFIG_PAGE_OFFSET, r20
1962     andi    r20, -4, r20            /* reset reserved */
1963     ori r20, 1, r20         /* set MMUOFF */
1964     putcon  r19, VBR
1965     putcon  r20, RESVEC
1966 
1967     /* Sanity check */
1968     movi    LVBR_block_end, r21
1969     andi    r21, -4, r21
1970     movi    BLOCK_SIZE, r29         /* r29 = expected size */
1971     or  r19, ZERO, r30
1972     add r19, r29, r19
1973 
1974     /*
1975      * Ugly, but better loop forever now than crash afterwards.
1976      * We should print a message, but if we touch LVBR or
1977      * LRESVEC blocks we should not be surprised if we get stuck
1978      * in trap_init().
1979      */
1980     pta trap_init_loop, tr1
1981     gettr   tr1, r28            /* r28 = trap_init_loop */
1982     sub r21, r30, r30           /* r30 = actual size */
1983 
1984     /*
1985      * VBR/RESVEC handlers overlap by being bigger than
1986      * allowed. Very bad. Just loop forever.
1987      * (r28) panic/loop address
1988      * (r29) expected size
1989      * (r30) actual size
1990      */
1991 trap_init_loop:
1992     bne r19, r21, tr1
1993 
1994     /* Now that exception vectors are set up reset SR.BL */
1995     getcon  SR, r22
1996     movi    SR_UNBLOCK_EXC, r23
1997     and r22, r23, r22
1998     putcon  r22, SR
1999 
2000     addi    SP, 24, SP
2001     ptabs   LINK, tr0
2002     blink   tr0, ZERO
2003