Back to home page

LXR

 
 

    


0001 /*
0002  * etrap.S: Sparc trap window preparation for entry into the
0003  *          Linux kernel.
0004  *
0005  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
0006  */
0007 
0008 #include <asm/head.h>
0009 #include <asm/asi.h>
0010 #include <asm/contregs.h>
0011 #include <asm/page.h>
0012 #include <asm/psr.h>
0013 #include <asm/ptrace.h>
0014 #include <asm/winmacro.h>
0015 #include <asm/asmmacro.h>
0016 #include <asm/thread_info.h>
0017 
0018 /* Registers to not touch at all. */
0019 #define t_psr        l0 /* Set by caller */
0020 #define t_pc         l1 /* Set by caller */
0021 #define t_npc        l2 /* Set by caller */
0022 #define t_wim        l3 /* Set by caller */
0023 #define t_twinmask   l4 /* Set at beginning of this entry routine. */
0024 #define t_kstack     l5 /* Set right before pt_regs frame is built */
0025 #define t_retpc      l6 /* If you change this, change winmacro.h header file */
0026 #define t_systable   l7 /* Never touch this, could be the syscall table ptr. */
0027 #define curptr       g6 /* Set after pt_regs frame is built */
0028 
0029     .text
0030     .align 4
0031 
0032     /* SEVEN WINDOW PATCH INSTRUCTIONS */
0033     .globl  tsetup_7win_patch1, tsetup_7win_patch2
0034     .globl  tsetup_7win_patch3, tsetup_7win_patch4
0035     .globl  tsetup_7win_patch5, tsetup_7win_patch6
0036 tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
0037 tsetup_7win_patch2: and %g2, 0x7f, %g2
0038 tsetup_7win_patch3: and %g2, 0x7f, %g2
0039 tsetup_7win_patch4: and %g1, 0x7f, %g1
0040 tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
0041 tsetup_7win_patch6: and %g2, 0x7f, %g2
0042     /* END OF PATCH INSTRUCTIONS */
0043 
0044     /* At trap time, interrupts and all generic traps do the
0045      * following:
0046      *
0047      * rd   %psr, %l0
0048      * b    some_handler
0049      * rd   %wim, %l3
0050      * nop
0051      *
0052      * Then 'some_handler' if it needs a trap frame (ie. it has
0053      * to call c-code and the trap cannot be handled in-window)
0054      * then it does the SAVE_ALL macro in entry.S which does
0055      *
0056      * sethi    %hi(trap_setup), %l4
0057      * jmpl     %l4 + %lo(trap_setup), %l6
0058      * nop
0059      */
0060 
0061     /* 2 3 4  window number
0062      * -----
0063      * O T S  mnemonic
0064      *
0065      * O == Current window before trap
0066      * T == Window entered when trap occurred
0067      * S == Window we will need to save if (1<<T) == %wim
0068      *
0069      * Before execution gets here, it must be guaranteed that
0070      * %l0 contains trap time %psr, %l1 and %l2 contain the
0071      * trap pc and npc, and %l3 contains the trap time %wim.
0072      */
0073 
0074     .globl  trap_setup, tsetup_patch1, tsetup_patch2
0075     .globl  tsetup_patch3, tsetup_patch4
0076     .globl  tsetup_patch5, tsetup_patch6
0077 trap_setup:
0078     /* Calculate mask of trap window.  See if from user
0079      * or kernel and branch conditionally.
0080      */
0081     mov 1, %t_twinmask
0082     andcc   %t_psr, PSR_PS, %g0      ! fromsupv_p = (psr & PSR_PS)
0083     be  trap_setup_from_user         ! nope, from user mode
0084      sll    %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
0085 
0086     /* From kernel, allocate more kernel stack and
0087      * build a pt_regs trap frame.
0088      */
0089     sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
0090     STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
0091 
0092     /* See if we are in the trap window. */
0093     andcc   %t_twinmask, %t_wim, %g0
0094     bne trap_setup_kernel_spill     ! in trap window, clean up
0095      nop
0096 
0097     /* Trap from kernel with a window available.
0098      * Just do it...
0099      */
0100     jmpl    %t_retpc + 0x8, %g0 ! return to caller
0101      mov    %t_kstack, %sp      ! jump onto new stack
0102 
0103 trap_setup_kernel_spill:
0104     ld  [%curptr + TI_UWINMASK], %g1
0105     orcc    %g0, %g1, %g0
0106     bne trap_setup_user_spill   ! there are some user windows, yuck
0107     /* Spill from kernel, but only kernel windows, adjust
0108      * %wim and go.
0109      */
0110      srl    %t_wim, 0x1, %g2    ! begin computation of new %wim
0111 tsetup_patch1:
0112     sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
0113     or  %t_wim, %g2, %g2
0114 tsetup_patch2:
0115     and %g2, 0xff, %g2      ! patched on 7 window Sparcs
0116 
0117     save    %g0, %g0, %g0
0118 
0119     /* Set new %wim value */
0120     wr  %g2, 0x0, %wim
0121 
0122     /* Save the kernel window onto the corresponding stack. */
0123     STORE_WINDOW(sp)
0124 
0125     restore %g0, %g0, %g0
0126 
0127     jmpl    %t_retpc + 0x8, %g0 ! return to caller
0128      mov    %t_kstack, %sp      ! and onto new kernel stack
0129 
0130 #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
0131 
0132 trap_setup_from_user:
0133     /* We can't use %curptr yet. */
0134     LOAD_CURRENT(t_kstack, t_twinmask)
0135 
0136     sethi   %hi(STACK_OFFSET), %t_twinmask
0137     or  %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
0138     add %t_kstack, %t_twinmask, %t_kstack
0139 
0140     mov 1, %t_twinmask
0141     sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
0142 
0143     /* Build pt_regs frame. */
0144     STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
0145 
0146 #if 0
0147     /* If we're sure every task_struct is THREAD_SIZE aligned,
0148        we can speed this up. */
0149     sethi   %hi(STACK_OFFSET), %curptr
0150     or  %curptr, %lo(STACK_OFFSET), %curptr
0151     sub %t_kstack, %curptr, %curptr
0152 #else
0153     sethi   %hi(~(THREAD_SIZE - 1)), %curptr
0154     and %t_kstack, %curptr, %curptr
0155 #endif
0156 
0157     /* Clear current_thread_info->w_saved */
0158     st  %g0, [%curptr + TI_W_SAVED]
0159 
0160     /* See if we are in the trap window. */
0161     andcc   %t_twinmask, %t_wim, %g0
0162     bne trap_setup_user_spill       ! yep we are
0163      orn    %g0, %t_twinmask, %g1       ! negate trap win mask into %g1
0164 
0165     /* Trap from user, but not into the invalid window.
0166      * Calculate new umask.  The way this works is,
0167      * any window from the %wim at trap time until
0168      * the window right before the one we are in now,
0169      * is a user window.  A diagram:
0170      *
0171      *      7 6 5 4 3 2 1 0    window number
0172      *      ---------------
0173      *        I     L T        mnemonic
0174      *
0175      * Window 'I' is the invalid window in our example,
0176      * window 'L' is the window the user was in when
0177      * the trap occurred, window T is the trap window
0178      * we are in now.  So therefore, windows 5, 4 and
0179      * 3 are user windows.  The following sequence
0180      * computes the user winmask to represent this.
0181      */
0182     subcc   %t_wim, %t_twinmask, %g2
0183     bneg,a  1f
0184      sub    %g2, 0x1, %g2
0185 1:
0186     andn    %g2, %t_twinmask, %g2
0187 tsetup_patch3:
0188     and %g2, 0xff, %g2          ! patched on 7win Sparcs
0189     st  %g2, [%curptr + TI_UWINMASK]    ! store new umask
0190 
0191     jmpl    %t_retpc + 0x8, %g0     ! return to caller
0192      mov    %t_kstack, %sp          ! and onto kernel stack
0193 
0194 trap_setup_user_spill:
0195     /* A spill occurred from either kernel or user mode
0196      * and there exist some user windows to deal with.
0197      * A mask of the currently valid user windows
0198      * is in %g1 upon entry to here.
0199      */
0200 
0201 tsetup_patch4:
0202     and %g1, 0xff, %g1      ! patched on 7win Sparcs, mask
0203     srl %t_wim, 0x1, %g2    ! compute new %wim
0204 tsetup_patch5:
0205     sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
0206     or  %t_wim, %g2, %g2    ! %g2 is new %wim
0207 tsetup_patch6:
0208     and %g2, 0xff, %g2      ! patched on 7win Sparcs
0209     andn    %g1, %g2, %g1       ! clear this bit in %g1
0210     st  %g1, [%curptr + TI_UWINMASK]
0211 
0212     save    %g0, %g0, %g0
0213 
0214     wr  %g2, 0x0, %wim
0215 
0216     /* Call MMU-architecture dependent stack checking
0217      * routine.
0218      */
0219     b   tsetup_srmmu_stackchk
0220      andcc  %sp, 0x7, %g0
0221 
0222     /* Architecture specific stack checking routines.  When either
0223      * of these routines are called, the globals are free to use
0224      * as they have been safely stashed on the new kernel stack
0225      * pointer.  Thus the definition below for simplicity.
0226      */
0227 #define glob_tmp     g1
0228 
0229     .globl  tsetup_srmmu_stackchk
0230 tsetup_srmmu_stackchk:
0231     /* Check results of callers andcc %sp, 0x7, %g0 */
0232     bne trap_setup_user_stack_is_bolixed
0233      sethi   %hi(PAGE_OFFSET), %glob_tmp
0234 
0235     cmp %glob_tmp, %sp
0236     bleu,a  1f
0237 LEON_PI( lda    [%g0] ASI_LEON_MMUREGS, %glob_tmp)  ! read MMU control
0238 SUN_PI_( lda    [%g0] ASI_M_MMUREGS, %glob_tmp)     ! read MMU control
0239 
0240 trap_setup_user_stack_is_bolixed:
0241     /* From user/kernel into invalid window w/bad user
0242      * stack. Save bad user stack, and return to caller.
0243      */
0244     SAVE_BOLIXED_USER_STACK(curptr, g3)
0245     restore %g0, %g0, %g0
0246 
0247     jmpl    %t_retpc + 0x8, %g0
0248      mov    %t_kstack, %sp
0249 
0250 1:
0251     /* Clear the fault status and turn on the no_fault bit. */
0252     or  %glob_tmp, 0x2, %glob_tmp       ! or in no_fault bit
0253 LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)      ! set it
0254 SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)     ! set it
0255 
0256     /* Dump the registers and cross fingers. */
0257     STORE_WINDOW(sp)
0258 
0259     /* Clear the no_fault bit and check the status. */
0260     andn    %glob_tmp, 0x2, %glob_tmp
0261 LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
0262 SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
0263 
0264     mov AC_M_SFAR, %glob_tmp
0265 LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
0266 SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
0267 
0268     mov AC_M_SFSR, %glob_tmp
0269 LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
0270 SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)   ! save away status of winstore
0271 
0272     andcc   %glob_tmp, 0x2, %g0         ! did we fault?
0273     bne trap_setup_user_stack_is_bolixed    ! failure
0274      nop
0275 
0276     restore %g0, %g0, %g0
0277 
0278     jmpl    %t_retpc + 0x8, %g0
0279      mov    %t_kstack, %sp
0280