Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
0003  *
0004  * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
0005  * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
0006  * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
0007  * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
0008  * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
0009  */
0010 
0011 #include <linux/linkage.h>
0012 #include <linux/errno.h>
0013 #include <linux/pgtable.h>
0014 
0015 #include <asm/head.h>
0016 #include <asm/asi.h>
0017 #include <asm/smp.h>
0018 #include <asm/contregs.h>
0019 #include <asm/ptrace.h>
0020 #include <asm/asm-offsets.h>
0021 #include <asm/psr.h>
0022 #include <asm/vaddrs.h>
0023 #include <asm/page.h>
0024 #include <asm/winmacro.h>
0025 #include <asm/signal.h>
0026 #include <asm/obio.h>
0027 #include <asm/mxcc.h>
0028 #include <asm/thread_info.h>
0029 #include <asm/param.h>
0030 #include <asm/unistd.h>
0031 
0032 #include <asm/asmmacro.h>
0033 #include <asm/export.h>
0034 
0035 #define curptr      g6
0036 
0037 /* These are just handy. */
0038 #define _SV save    %sp, -STACKFRAME_SZ, %sp
0039 #define _RS     restore 
0040 
0041 #define FLUSH_ALL_KERNEL_WINDOWS \
0042     _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
0043     _RS; _RS; _RS; _RS; _RS; _RS; _RS;
0044 
0045     .text
0046 
0047 #ifdef CONFIG_KGDB
0048     .align  4
0049     .globl      arch_kgdb_breakpoint
0050     .type       arch_kgdb_breakpoint,#function
0051 arch_kgdb_breakpoint:
0052     ta      0x7d
0053     retl
0054      nop
0055     .size       arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
0056 #endif
0057 
0058 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
0059     .align  4
0060     .globl  floppy_hardint
0061 floppy_hardint:
0062     /*
0063      * This code cannot touch registers %l0 %l1 and %l2
0064      * because SAVE_ALL depends on their values. It depends
0065      * on %l3 also, but we regenerate it before a call.
0066      * Other registers are:
0067      * %l3 -- base address of fdc registers
0068      * %l4 -- pdma_vaddr
0069      * %l5 -- scratch for ld/st address
0070      * %l6 -- pdma_size
0071      * %l7 -- scratch [floppy byte, ld/st address, aux. data]
0072      */
0073 
0074     /* Do we have work to do? */
0075     sethi   %hi(doing_pdma), %l7
0076     ld  [%l7 + %lo(doing_pdma)], %l7
0077     cmp %l7, 0
0078     be  floppy_dosoftint
0079      nop
0080 
0081     /* Load fdc register base */
0082     sethi   %hi(fdc_status), %l3
0083     ld  [%l3 + %lo(fdc_status)], %l3
0084 
0085     /* Setup register addresses */
0086     sethi   %hi(pdma_vaddr), %l5    ! transfer buffer
0087     ld  [%l5 + %lo(pdma_vaddr)], %l4
0088     sethi   %hi(pdma_size), %l5 ! bytes to go
0089     ld  [%l5 + %lo(pdma_size)], %l6
0090 next_byte:
0091     ldub    [%l3], %l7
0092 
0093     andcc   %l7, 0x80, %g0      ! Does fifo still have data
0094     bz  floppy_fifo_emptied ! fifo has been emptied...
0095      andcc  %l7, 0x20, %g0      ! in non-dma mode still?
0096     bz  floppy_overrun      ! nope, overrun
0097      andcc  %l7, 0x40, %g0      ! 0=write 1=read
0098     bz  floppy_write
0099      sub    %l6, 0x1, %l6
0100 
0101     /* Ok, actually read this byte */
0102     ldub    [%l3 + 1], %l7
0103     orcc    %g0, %l6, %g0
0104     stb %l7, [%l4]
0105     bne next_byte
0106      add    %l4, 0x1, %l4
0107 
0108     b   floppy_tdone
0109      nop
0110 
0111 floppy_write:
0112     /* Ok, actually write this byte */
0113     ldub    [%l4], %l7
0114     orcc    %g0, %l6, %g0
0115     stb %l7, [%l3 + 1]
0116     bne next_byte
0117      add    %l4, 0x1, %l4
0118 
0119     /* fall through... */
0120 floppy_tdone:
0121     sethi   %hi(pdma_vaddr), %l5
0122     st  %l4, [%l5 + %lo(pdma_vaddr)]
0123     sethi   %hi(pdma_size), %l5
0124     st  %l6, [%l5 + %lo(pdma_size)]
0125     /* Flip terminal count pin */
0126     set auxio_register, %l7
0127     ld  [%l7], %l7
0128 
0129     ldub    [%l7], %l5
0130 
0131     or  %l5, 0xc2, %l5
0132     stb %l5, [%l7]
0133     andn    %l5, 0x02, %l5
0134 
0135 2:
0136     /* Kill some time so the bits set */
0137     WRITE_PAUSE
0138     WRITE_PAUSE
0139 
0140     stb     %l5, [%l7]
0141 
0142     /* Prevent recursion */
0143     sethi   %hi(doing_pdma), %l7
0144     b   floppy_dosoftint
0145      st %g0, [%l7 + %lo(doing_pdma)]
0146 
0147     /* We emptied the FIFO, but we haven't read everything
0148      * as of yet.  Store the current transfer address and
0149      * bytes left to read so we can continue when the next
0150      * fast IRQ comes in.
0151      */
0152 floppy_fifo_emptied:
0153     sethi   %hi(pdma_vaddr), %l5
0154     st  %l4, [%l5 + %lo(pdma_vaddr)]
0155     sethi   %hi(pdma_size), %l7
0156     st  %l6, [%l7 + %lo(pdma_size)]
0157 
0158     /* Restore condition codes */
0159     wr  %l0, 0x0, %psr
0160     WRITE_PAUSE
0161 
0162     jmp %l1
0163     rett    %l2
0164 
0165 floppy_overrun:
0166     sethi   %hi(pdma_vaddr), %l5
0167     st  %l4, [%l5 + %lo(pdma_vaddr)]
0168     sethi   %hi(pdma_size), %l5
0169     st  %l6, [%l5 + %lo(pdma_size)]
0170     /* Prevent recursion */
0171     sethi   %hi(doing_pdma), %l7
0172     st  %g0, [%l7 + %lo(doing_pdma)]
0173 
0174     /* fall through... */
0175 floppy_dosoftint:
0176     rd  %wim, %l3
0177     SAVE_ALL
0178 
0179     /* Set all IRQs off. */
0180     or  %l0, PSR_PIL, %l4
0181     wr  %l4, 0x0, %psr
0182     WRITE_PAUSE
0183     wr  %l4, PSR_ET, %psr
0184     WRITE_PAUSE
0185 
0186     mov 11, %o0         ! floppy irq level (unused anyway)
0187     mov %g0, %o1        ! devid is not used in fast interrupts
0188     call    sparc_floppy_irq
0189      add    %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
0190 
0191     RESTORE_ALL
0192     
0193 #endif /* (CONFIG_BLK_DEV_FD) */
0194 
0195     /* Bad trap handler */
0196     .globl  bad_trap_handler
0197 bad_trap_handler:
0198     SAVE_ALL
0199 
0200     wr  %l0, PSR_ET, %psr
0201     WRITE_PAUSE
0202 
0203     add %sp, STACKFRAME_SZ, %o0 ! pt_regs
0204     call    do_hw_interrupt
0205      mov    %l7, %o1        ! trap number
0206 
0207     RESTORE_ALL
0208     
0209 /* For now all IRQ's not registered get sent here. handler_irq() will
0210  * see if a routine is registered to handle this interrupt and if not
0211  * it will say so on the console.
0212  */
0213 
0214     .align  4
0215     .globl  real_irq_entry, patch_handler_irq
0216 real_irq_entry:
0217     SAVE_ALL
0218 
0219 #ifdef CONFIG_SMP
0220     .globl  patchme_maybe_smp_msg
0221 
0222     cmp %l7, 11
0223 patchme_maybe_smp_msg:
0224     bgu maybe_smp4m_msg
0225      nop
0226 #endif
0227 
0228 real_irq_continue:
0229     or  %l0, PSR_PIL, %g2
0230     wr  %g2, 0x0, %psr
0231     WRITE_PAUSE
0232     wr  %g2, PSR_ET, %psr
0233     WRITE_PAUSE
0234     mov %l7, %o0        ! irq level
0235 patch_handler_irq:
0236     call    handler_irq
0237      add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
0238     or  %l0, PSR_PIL, %g2   ! restore PIL after handler_irq
0239     wr  %g2, PSR_ET, %psr   ! keep ET up
0240     WRITE_PAUSE
0241 
0242     RESTORE_ALL
0243 
0244 #ifdef CONFIG_SMP
0245     /* SMP per-cpu ticker interrupts are handled specially. */
0246 smp4m_ticker:
0247     bne real_irq_continue+4
0248      or %l0, PSR_PIL, %g2
0249     wr  %g2, 0x0, %psr
0250     WRITE_PAUSE
0251     wr  %g2, PSR_ET, %psr
0252     WRITE_PAUSE
0253     call    smp4m_percpu_timer_interrupt
0254      add    %sp, STACKFRAME_SZ, %o0
0255     wr  %l0, PSR_ET, %psr
0256     WRITE_PAUSE
0257     RESTORE_ALL
0258 
0259 #define GET_PROCESSOR4M_ID(reg) \
0260     rd  %tbr, %reg; \
0261     srl %reg, 12, %reg; \
0262     and %reg, 3, %reg;
0263 
0264     /* Here is where we check for possible SMP IPI passed to us
0265      * on some level other than 15 which is the NMI and only used
0266      * for cross calls.  That has a separate entry point below.
0267      *
0268      * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
0269      */
0270 maybe_smp4m_msg:
0271     GET_PROCESSOR4M_ID(o3)
0272     sethi   %hi(sun4m_irq_percpu), %l5
0273     sll %o3, 2, %o3
0274     or  %l5, %lo(sun4m_irq_percpu), %o5
0275     sethi   %hi(0x70000000), %o2    ! Check all soft-IRQs
0276     ld  [%o5 + %o3], %o1
0277     ld  [%o1 + 0x00], %o3   ! sun4m_irq_percpu[cpu]->pending
0278     andcc   %o3, %o2, %g0
0279     be,a    smp4m_ticker
0280      cmp    %l7, 14
0281     /* Soft-IRQ IPI */
0282     st  %o2, [%o1 + 0x04]   ! sun4m_irq_percpu[cpu]->clear=0x70000000
0283     WRITE_PAUSE
0284     ld  [%o1 + 0x00], %g0   ! sun4m_irq_percpu[cpu]->pending
0285     WRITE_PAUSE
0286     or  %l0, PSR_PIL, %l4
0287     wr  %l4, 0x0, %psr
0288     WRITE_PAUSE
0289     wr  %l4, PSR_ET, %psr
0290     WRITE_PAUSE
0291     srl %o3, 28, %o2        ! shift for simpler checks below
0292 maybe_smp4m_msg_check_single:
0293     andcc   %o2, 0x1, %g0
0294     beq,a   maybe_smp4m_msg_check_mask
0295      andcc  %o2, 0x2, %g0
0296     call    smp_call_function_single_interrupt
0297      nop
0298     andcc   %o2, 0x2, %g0
0299 maybe_smp4m_msg_check_mask:
0300     beq,a   maybe_smp4m_msg_check_resched
0301      andcc  %o2, 0x4, %g0
0302     call    smp_call_function_interrupt
0303      nop
0304     andcc   %o2, 0x4, %g0
0305 maybe_smp4m_msg_check_resched:
0306     /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
0307     beq,a   maybe_smp4m_msg_out
0308      nop
0309     call    smp_resched_interrupt
0310      nop
0311 maybe_smp4m_msg_out:
0312     RESTORE_ALL
0313 
0314     .align  4
0315     .globl  linux_trap_ipi15_sun4m
0316 linux_trap_ipi15_sun4m:
0317     SAVE_ALL
0318     sethi   %hi(0x80000000), %o2
0319     GET_PROCESSOR4M_ID(o0)
0320     sethi   %hi(sun4m_irq_percpu), %l5
0321     or  %l5, %lo(sun4m_irq_percpu), %o5
0322     sll %o0, 2, %o0
0323     ld  [%o5 + %o0], %o5
0324     ld  [%o5 + 0x00], %o3   ! sun4m_irq_percpu[cpu]->pending
0325     andcc   %o3, %o2, %g0
0326     be  sun4m_nmi_error     ! Must be an NMI async memory error
0327      st %o2, [%o5 + 0x04]   ! sun4m_irq_percpu[cpu]->clear=0x80000000
0328     WRITE_PAUSE
0329     ld  [%o5 + 0x00], %g0   ! sun4m_irq_percpu[cpu]->pending
0330     WRITE_PAUSE
0331     or  %l0, PSR_PIL, %l4
0332     wr  %l4, 0x0, %psr
0333     WRITE_PAUSE
0334     wr  %l4, PSR_ET, %psr
0335     WRITE_PAUSE
0336     call    smp4m_cross_call_irq
0337      nop
0338     b   ret_trap_lockless_ipi
0339      clr    %l6
0340 
0341     .globl  smp4d_ticker
0342     /* SMP per-cpu ticker interrupts are handled specially. */
0343 smp4d_ticker:
0344     SAVE_ALL
0345     or  %l0, PSR_PIL, %g2
0346     sethi   %hi(CC_ICLR), %o0
0347     sethi   %hi(1 << 14), %o1
0348     or  %o0, %lo(CC_ICLR), %o0
0349     stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 14 in MXCC's ICLR */
0350     wr  %g2, 0x0, %psr
0351     WRITE_PAUSE
0352     wr  %g2, PSR_ET, %psr
0353     WRITE_PAUSE
0354     call    smp4d_percpu_timer_interrupt
0355      add    %sp, STACKFRAME_SZ, %o0
0356     wr  %l0, PSR_ET, %psr
0357     WRITE_PAUSE
0358     RESTORE_ALL
0359 
0360     .align  4
0361     .globl  linux_trap_ipi15_sun4d
0362 linux_trap_ipi15_sun4d:
0363     SAVE_ALL
0364     sethi   %hi(CC_BASE), %o4
0365     sethi   %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
0366     or  %o4, (CC_EREG - CC_BASE), %o0
0367     ldda    [%o0] ASI_M_MXCC, %o0
0368     andcc   %o0, %o2, %g0
0369     bne 1f
0370      sethi  %hi(BB_STAT2), %o2
0371     lduba   [%o2] ASI_M_CTL, %o2
0372     andcc   %o2, BB_STAT2_MASK, %g0
0373     bne 2f
0374      or %o4, (CC_ICLR - CC_BASE), %o0
0375     sethi   %hi(1 << 15), %o1
0376     stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 15 in MXCC's ICLR */
0377     or  %l0, PSR_PIL, %l4
0378     wr  %l4, 0x0, %psr
0379     WRITE_PAUSE
0380     wr  %l4, PSR_ET, %psr
0381     WRITE_PAUSE
0382     call    smp4d_cross_call_irq
0383      nop
0384     b   ret_trap_lockless_ipi
0385      clr    %l6
0386 
0387 1:  /* MXCC error */
0388 2:  /* BB error */
0389     /* Disable PIL 15 */
0390     set CC_IMSK, %l4
0391     lduha   [%l4] ASI_M_MXCC, %l5
0392     sethi   %hi(1 << 15), %l7
0393     or  %l5, %l7, %l5
0394     stha    %l5, [%l4] ASI_M_MXCC
0395     /* FIXME */
0396 1:  b,a 1b
0397 
0398     .globl  smpleon_ipi
0399     .extern leon_ipi_interrupt
0400     /* SMP per-cpu IPI interrupts are handled specially. */
0401 smpleon_ipi:
0402         SAVE_ALL
0403     or  %l0, PSR_PIL, %g2
0404     wr  %g2, 0x0, %psr
0405     WRITE_PAUSE
0406     wr  %g2, PSR_ET, %psr
0407     WRITE_PAUSE
0408     call    leonsmp_ipi_interrupt
0409      add    %sp, STACKFRAME_SZ, %o1 ! pt_regs
0410     wr  %l0, PSR_ET, %psr
0411     WRITE_PAUSE
0412     RESTORE_ALL
0413 
0414     .align  4
0415     .globl  linux_trap_ipi15_leon
0416 linux_trap_ipi15_leon:
0417     SAVE_ALL
0418     or  %l0, PSR_PIL, %l4
0419     wr  %l4, 0x0, %psr
0420     WRITE_PAUSE
0421     wr  %l4, PSR_ET, %psr
0422     WRITE_PAUSE
0423     call    leon_cross_call_irq
0424      nop
0425     b   ret_trap_lockless_ipi
0426      clr    %l6
0427 
0428 #endif /* CONFIG_SMP */
0429 
0430     /* This routine handles illegal instructions and privileged
0431      * instruction attempts from user code.
0432      */
0433     .align  4
0434     .globl  bad_instruction
0435 bad_instruction:
0436     sethi   %hi(0xc1f80000), %l4
0437     ld  [%l1], %l5
0438     sethi   %hi(0x81d80000), %l7
0439     and %l5, %l4, %l5
0440     cmp %l5, %l7
0441     be  1f
0442     SAVE_ALL
0443 
0444     wr  %l0, PSR_ET, %psr       ! re-enable traps
0445     WRITE_PAUSE
0446 
0447     add %sp, STACKFRAME_SZ, %o0
0448     mov %l1, %o1
0449     mov %l2, %o2
0450     call    do_illegal_instruction
0451      mov    %l0, %o3
0452 
0453     RESTORE_ALL
0454 
0455 1:  /* unimplemented flush - just skip */
0456     jmpl    %l2, %g0
0457      rett   %l2 + 4
0458 
0459     .align  4
0460     .globl  priv_instruction
0461 priv_instruction:
0462     SAVE_ALL
0463 
0464     wr  %l0, PSR_ET, %psr
0465     WRITE_PAUSE
0466 
0467     add %sp, STACKFRAME_SZ, %o0
0468     mov %l1, %o1
0469     mov %l2, %o2
0470     call    do_priv_instruction
0471      mov    %l0, %o3
0472 
0473     RESTORE_ALL
0474 
0475     /* This routine handles unaligned data accesses. */
0476     .align  4
0477     .globl  mna_handler
0478 mna_handler:
0479     andcc   %l0, PSR_PS, %g0
0480     be  mna_fromuser
0481      nop
0482 
0483     SAVE_ALL
0484 
0485     wr  %l0, PSR_ET, %psr
0486     WRITE_PAUSE
0487 
0488     ld  [%l1], %o1
0489     call    kernel_unaligned_trap
0490      add    %sp, STACKFRAME_SZ, %o0
0491 
0492     RESTORE_ALL
0493 
0494 mna_fromuser:
0495     SAVE_ALL
0496 
0497     wr  %l0, PSR_ET, %psr       ! re-enable traps
0498     WRITE_PAUSE
0499 
0500     ld  [%l1], %o1
0501     call    user_unaligned_trap
0502      add    %sp, STACKFRAME_SZ, %o0
0503 
0504     RESTORE_ALL
0505 
0506     /* This routine handles floating point disabled traps. */
0507     .align  4
0508     .globl  fpd_trap_handler
0509 fpd_trap_handler:
0510     SAVE_ALL
0511 
0512     wr  %l0, PSR_ET, %psr       ! re-enable traps
0513     WRITE_PAUSE
0514 
0515     add %sp, STACKFRAME_SZ, %o0
0516     mov %l1, %o1
0517     mov %l2, %o2
0518     call    do_fpd_trap
0519      mov    %l0, %o3
0520 
0521     RESTORE_ALL
0522 
0523     /* This routine handles Floating Point Exceptions. */
0524     .align  4
0525     .globl  fpe_trap_handler
0526 fpe_trap_handler:
0527     set fpsave_magic, %l5
0528     cmp %l1, %l5
0529     be  1f
0530      sethi  %hi(fpsave), %l5
0531     or  %l5, %lo(fpsave), %l5
0532     cmp %l1, %l5
0533     bne 2f
0534      sethi  %hi(fpsave_catch2), %l5
0535     or  %l5, %lo(fpsave_catch2), %l5
0536     wr  %l0, 0x0, %psr
0537     WRITE_PAUSE
0538     jmp %l5
0539      rett   %l5 + 4
0540 1:  
0541     sethi   %hi(fpsave_catch), %l5
0542     or  %l5, %lo(fpsave_catch), %l5
0543     wr  %l0, 0x0, %psr
0544     WRITE_PAUSE
0545     jmp %l5
0546      rett   %l5 + 4
0547 
0548 2:
0549     SAVE_ALL
0550 
0551     wr  %l0, PSR_ET, %psr       ! re-enable traps
0552     WRITE_PAUSE
0553 
0554     add %sp, STACKFRAME_SZ, %o0
0555     mov %l1, %o1
0556     mov %l2, %o2
0557     call    do_fpe_trap
0558      mov    %l0, %o3
0559 
0560     RESTORE_ALL
0561 
0562     /* This routine handles Tag Overflow Exceptions. */
0563     .align  4
0564     .globl  do_tag_overflow
0565 do_tag_overflow:
0566     SAVE_ALL
0567 
0568     wr  %l0, PSR_ET, %psr       ! re-enable traps
0569     WRITE_PAUSE
0570 
0571     add %sp, STACKFRAME_SZ, %o0
0572     mov %l1, %o1
0573     mov %l2, %o2
0574     call    handle_tag_overflow
0575      mov    %l0, %o3
0576 
0577     RESTORE_ALL
0578 
0579     /* This routine handles Watchpoint Exceptions. */
0580     .align  4
0581     .globl  do_watchpoint
0582 do_watchpoint:
0583     SAVE_ALL
0584 
0585     wr  %l0, PSR_ET, %psr       ! re-enable traps
0586     WRITE_PAUSE
0587 
0588     add %sp, STACKFRAME_SZ, %o0
0589     mov %l1, %o1
0590     mov %l2, %o2
0591     call    handle_watchpoint
0592      mov    %l0, %o3
0593 
0594     RESTORE_ALL
0595 
0596     /* This routine handles Register Access Exceptions. */
0597     .align  4
0598     .globl  do_reg_access
0599 do_reg_access:
0600     SAVE_ALL
0601 
0602     wr  %l0, PSR_ET, %psr       ! re-enable traps
0603     WRITE_PAUSE
0604 
0605     add %sp, STACKFRAME_SZ, %o0
0606     mov %l1, %o1
0607     mov %l2, %o2
0608     call    handle_reg_access
0609      mov    %l0, %o3
0610 
0611     RESTORE_ALL
0612 
0613     /* This routine handles Co-Processor Disabled Exceptions. */
0614     .align  4
0615     .globl  do_cp_disabled
0616 do_cp_disabled:
0617     SAVE_ALL
0618 
0619     wr  %l0, PSR_ET, %psr       ! re-enable traps
0620     WRITE_PAUSE
0621 
0622     add %sp, STACKFRAME_SZ, %o0
0623     mov %l1, %o1
0624     mov %l2, %o2
0625     call    handle_cp_disabled
0626      mov    %l0, %o3
0627 
0628     RESTORE_ALL
0629 
0630     /* This routine handles Co-Processor Exceptions. */
0631     .align  4
0632     .globl  do_cp_exception
0633 do_cp_exception:
0634     SAVE_ALL
0635 
0636     wr  %l0, PSR_ET, %psr       ! re-enable traps
0637     WRITE_PAUSE
0638 
0639     add %sp, STACKFRAME_SZ, %o0
0640     mov %l1, %o1
0641     mov %l2, %o2
0642     call    handle_cp_exception
0643      mov    %l0, %o3
0644 
0645     RESTORE_ALL
0646 
0647     /* This routine handles Hardware Divide By Zero Exceptions. */
0648     .align  4
0649     .globl  do_hw_divzero
0650 do_hw_divzero:
0651     SAVE_ALL
0652 
0653     wr  %l0, PSR_ET, %psr       ! re-enable traps
0654     WRITE_PAUSE
0655 
0656     add %sp, STACKFRAME_SZ, %o0
0657     mov %l1, %o1
0658     mov %l2, %o2
0659     call    handle_hw_divzero
0660      mov    %l0, %o3
0661 
0662     RESTORE_ALL
0663 
0664     .align  4
0665     .globl  do_flush_windows
0666 do_flush_windows:
0667     SAVE_ALL
0668 
0669     wr  %l0, PSR_ET, %psr
0670     WRITE_PAUSE
0671 
0672     andcc   %l0, PSR_PS, %g0
0673     bne dfw_kernel
0674      nop
0675 
0676     call    flush_user_windows
0677      nop
0678 
0679     /* Advance over the trap instruction. */
0680     ld  [%sp + STACKFRAME_SZ + PT_NPC], %l1
0681     add %l1, 0x4, %l2
0682     st  %l1, [%sp + STACKFRAME_SZ + PT_PC]
0683     st  %l2, [%sp + STACKFRAME_SZ + PT_NPC]
0684 
0685     RESTORE_ALL
0686 
0687     .globl  flush_patch_one
0688 
0689     /* We get these for debugging routines using __builtin_return_address() */
0690 dfw_kernel:
0691 flush_patch_one:
0692     FLUSH_ALL_KERNEL_WINDOWS
0693 
0694     /* Advance over the trap instruction. */
0695     ld  [%sp + STACKFRAME_SZ + PT_NPC], %l1
0696     add %l1, 0x4, %l2
0697     st  %l1, [%sp + STACKFRAME_SZ + PT_PC]
0698     st  %l2, [%sp + STACKFRAME_SZ + PT_NPC]
0699 
0700     RESTORE_ALL
0701 
0702     /* The getcc software trap.  The user wants the condition codes from
0703      * the %psr in register %g1.
0704      */
0705 
0706     .align  4
0707     .globl  getcc_trap_handler
0708 getcc_trap_handler:
0709     srl %l0, 20, %g1    ! give user
0710     and %g1, 0xf, %g1   ! only ICC bits in %psr
0711     jmp %l2     ! advance over trap instruction
0712     rett    %l2 + 0x4   ! like this...
0713 
0714     /* The setcc software trap.  The user has condition codes in %g1
0715      * that it would like placed in the %psr.  Be careful not to flip
0716      * any unintentional bits!
0717      */
0718 
0719     .align  4
0720     .globl  setcc_trap_handler
0721 setcc_trap_handler:
0722     sll %g1, 0x14, %l4
0723     set PSR_ICC, %l5
0724     andn    %l0, %l5, %l0   ! clear ICC bits in %psr
0725     and %l4, %l5, %l4   ! clear non-ICC bits in user value
0726     or  %l4, %l0, %l4   ! or them in... mix mix mix
0727 
0728     wr  %l4, 0x0, %psr  ! set new %psr
0729     WRITE_PAUSE     ! TI scumbags...
0730 
0731     jmp %l2     ! advance over trap instruction
0732     rett    %l2 + 0x4   ! like this...
0733 
0734 sun4m_nmi_error:
0735     /* NMI async memory error handling. */
0736     sethi   %hi(0x80000000), %l4
0737     sethi   %hi(sun4m_irq_global), %o5
0738     ld  [%o5 + %lo(sun4m_irq_global)], %l5
0739     st  %l4, [%l5 + 0x0c]   ! sun4m_irq_global->mask_set=0x80000000
0740     WRITE_PAUSE
0741     ld  [%l5 + 0x00], %g0   ! sun4m_irq_global->pending
0742     WRITE_PAUSE
0743     or  %l0, PSR_PIL, %l4
0744     wr  %l4, 0x0, %psr
0745     WRITE_PAUSE
0746     wr  %l4, PSR_ET, %psr
0747     WRITE_PAUSE
0748     call    sun4m_nmi
0749      nop
0750     st  %l4, [%l5 + 0x08]   ! sun4m_irq_global->mask_clear=0x80000000
0751     WRITE_PAUSE
0752     ld  [%l5 + 0x00], %g0   ! sun4m_irq_global->pending
0753     WRITE_PAUSE
0754     RESTORE_ALL
0755 
0756 #ifndef CONFIG_SMP
0757     .align  4
0758     .globl  linux_trap_ipi15_sun4m
0759 linux_trap_ipi15_sun4m:
0760     SAVE_ALL
0761 
0762     ba  sun4m_nmi_error
0763      nop
0764 #endif /* CONFIG_SMP */
0765 
0766     .align  4
0767     .globl  srmmu_fault
0768 srmmu_fault:
0769     mov 0x400, %l5
0770     mov 0x300, %l4
0771 
0772 LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6)    ! read sfar first
0773 SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6)   ! read sfar first
0774 
0775 LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5)    ! read sfsr last
0776 SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5)   ! read sfsr last
0777 
0778     andn    %l6, 0xfff, %l6
0779     srl %l5, 6, %l5         ! and encode all info into l7
0780 
0781     and %l5, 2, %l5
0782     or  %l5, %l6, %l6
0783 
0784     or  %l6, %l7, %l7           ! l7 = [addr,write,txtfault]
0785 
0786     SAVE_ALL
0787 
0788     mov %l7, %o1
0789     mov %l7, %o2
0790     and %o1, 1, %o1     ! arg2 = text_faultp
0791     mov %l7, %o3
0792     and %o2, 2, %o2     ! arg3 = writep
0793     andn    %o3, 0xfff, %o3     ! arg4 = faulting address
0794 
0795     wr  %l0, PSR_ET, %psr
0796     WRITE_PAUSE
0797 
0798     call    do_sparc_fault
0799      add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
0800 
0801     RESTORE_ALL
0802 
0803     .align  4
0804 sunos_execv:
0805     .globl  sunos_execv
0806     b   sys_execve
0807      clr    %i2
0808 
0809     .align  4
0810     .globl  sys_sigstack
0811 sys_sigstack:
0812     mov %o7, %l5
0813     mov %fp, %o2
0814     call    do_sys_sigstack
0815      mov    %l5, %o7
0816 
0817     .align  4
0818     .globl  sys_sigreturn
0819 sys_sigreturn:
0820     call    do_sigreturn
0821      add    %sp, STACKFRAME_SZ, %o0
0822 
0823     ld  [%curptr + TI_FLAGS], %l5
0824     andcc   %l5, _TIF_SYSCALL_TRACE, %g0
0825     be  1f
0826      nop
0827 
0828     call    syscall_trace
0829      mov    1, %o1
0830 
0831 1:
0832     /* We don't want to muck with user registers like a
0833      * normal syscall, just return.
0834      */
0835     RESTORE_ALL
0836 
0837     .align  4
0838     .globl  sys_rt_sigreturn
0839 sys_rt_sigreturn:
0840     call    do_rt_sigreturn
0841      add    %sp, STACKFRAME_SZ, %o0
0842 
0843     ld  [%curptr + TI_FLAGS], %l5
0844     andcc   %l5, _TIF_SYSCALL_TRACE, %g0
0845     be  1f
0846      nop
0847 
0848     add %sp, STACKFRAME_SZ, %o0
0849     call    syscall_trace
0850      mov    1, %o1
0851 
0852 1:
0853     /* We are returning to a signal handler. */
0854     RESTORE_ALL
0855 
0856     /* Now that we have a real sys_clone, sys_fork() is
0857      * implemented in terms of it.  Our _real_ implementation
0858      * of SunOS vfork() will use sys_vfork().
0859      *
0860      * XXX These three should be consolidated into mostly shared
0861      * XXX code just like on sparc64... -DaveM
0862      */
0863     .align  4
0864     .globl  sys_fork, flush_patch_two
0865 sys_fork:
0866     mov %o7, %l5
0867 flush_patch_two:
0868     FLUSH_ALL_KERNEL_WINDOWS;
0869     ld  [%curptr + TI_TASK], %o4
0870     rd  %psr, %g4
0871     WRITE_PAUSE
0872     rd  %wim, %g5
0873     WRITE_PAUSE
0874     std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
0875     add %sp, STACKFRAME_SZ, %o0
0876     call    sparc_fork
0877      mov    %l5, %o7
0878 
0879     /* Whee, kernel threads! */
0880     .globl  sys_clone, flush_patch_three
0881 sys_clone:
0882     mov %o7, %l5
0883 flush_patch_three:
0884     FLUSH_ALL_KERNEL_WINDOWS;
0885     ld  [%curptr + TI_TASK], %o4
0886     rd  %psr, %g4
0887     WRITE_PAUSE
0888     rd  %wim, %g5
0889     WRITE_PAUSE
0890     std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
0891     add %sp, STACKFRAME_SZ, %o0
0892     call    sparc_clone
0893      mov    %l5, %o7
0894 
0895     /* Whee, real vfork! */
0896     .globl  sys_vfork, flush_patch_four
0897 sys_vfork:
0898 flush_patch_four:
0899     FLUSH_ALL_KERNEL_WINDOWS;
0900     ld  [%curptr + TI_TASK], %o4
0901     rd  %psr, %g4
0902     WRITE_PAUSE
0903     rd  %wim, %g5
0904     WRITE_PAUSE
0905     std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
0906     sethi   %hi(sparc_vfork), %l1
0907     jmpl    %l1 + %lo(sparc_vfork), %g0
0908      add    %sp, STACKFRAME_SZ, %o0
0909 
0910         .align  4
0911 linux_sparc_ni_syscall:
0912     sethi   %hi(sys_ni_syscall), %l7
0913     b       do_syscall
0914      or     %l7, %lo(sys_ni_syscall), %l7
0915 
0916 linux_syscall_trace:
0917     add %sp, STACKFRAME_SZ, %o0
0918     call    syscall_trace
0919      mov    0, %o1
0920     cmp %o0, 0
0921     bne 3f
0922      mov    -ENOSYS, %o0
0923 
0924     /* Syscall tracing can modify the registers.  */
0925     ld  [%sp + STACKFRAME_SZ + PT_G1], %g1
0926     sethi   %hi(sys_call_table), %l7
0927     ld  [%sp + STACKFRAME_SZ + PT_I0], %i0
0928     or  %l7, %lo(sys_call_table), %l7
0929     ld  [%sp + STACKFRAME_SZ + PT_I1], %i1
0930     ld  [%sp + STACKFRAME_SZ + PT_I2], %i2
0931     ld  [%sp + STACKFRAME_SZ + PT_I3], %i3
0932     ld  [%sp + STACKFRAME_SZ + PT_I4], %i4
0933     ld  [%sp + STACKFRAME_SZ + PT_I5], %i5
0934     cmp %g1, NR_syscalls
0935     bgeu    3f
0936      mov    -ENOSYS, %o0
0937 
0938     sll %g1, 2, %l4
0939     mov %i0, %o0
0940     ld  [%l7 + %l4], %l7
0941     mov %i1, %o1
0942     mov %i2, %o2
0943     mov %i3, %o3
0944     b   2f
0945      mov    %i4, %o4
0946 
0947     .globl  ret_from_fork
0948 ret_from_fork:
0949     call    schedule_tail
0950      ld [%g3 + TI_TASK], %o0
0951     b   ret_sys_call
0952      ld [%sp + STACKFRAME_SZ + PT_I0], %o0
0953 
0954     .globl  ret_from_kernel_thread
0955 ret_from_kernel_thread:
0956     call    schedule_tail
0957      ld [%g3 + TI_TASK], %o0
0958     ld  [%sp + STACKFRAME_SZ + PT_G1], %l0
0959     call    %l0
0960      ld [%sp + STACKFRAME_SZ + PT_G2], %o0
0961     rd  %psr, %l1
0962     ld  [%sp + STACKFRAME_SZ + PT_PSR], %l0
0963     andn    %l0, PSR_CWP, %l0
0964     nop
0965     and %l1, PSR_CWP, %l1
0966     or  %l0, %l1, %l0
0967     st  %l0, [%sp + STACKFRAME_SZ + PT_PSR]
0968     b   ret_sys_call
0969      mov    0, %o0
0970 
0971     /* Linux native system calls enter here... */
0972     .align  4
0973     .globl  linux_sparc_syscall
0974 linux_sparc_syscall:
0975     sethi   %hi(PSR_SYSCALL), %l4
0976     or  %l0, %l4, %l0
0977     /* Direct access to user regs, must faster. */
0978     cmp %g1, NR_syscalls
0979     bgeu    linux_sparc_ni_syscall
0980      sll    %g1, 2, %l4
0981     ld  [%l7 + %l4], %l7
0982 
0983 do_syscall:
0984     SAVE_ALL_HEAD
0985      rd %wim, %l3
0986 
0987     wr  %l0, PSR_ET, %psr
0988     mov %i0, %o0
0989     mov %i1, %o1
0990     mov %i2, %o2
0991 
0992     ld  [%curptr + TI_FLAGS], %l5
0993     mov %i3, %o3
0994     andcc   %l5, _TIF_SYSCALL_TRACE, %g0
0995     mov %i4, %o4
0996     bne linux_syscall_trace
0997      mov    %i0, %l6
0998 2:
0999     call    %l7
1000      mov    %i5, %o5
1001 
1002 3:
1003     st  %o0, [%sp + STACKFRAME_SZ + PT_I0]
1004 
1005 ret_sys_call:
1006     ld  [%curptr + TI_FLAGS], %l5
1007     cmp %o0, -ERESTART_RESTARTBLOCK
1008     ld  [%sp + STACKFRAME_SZ + PT_PSR], %g3
1009     set PSR_C, %g2
1010     bgeu    1f
1011      andcc  %l5, _TIF_SYSCALL_TRACE, %g0
1012 
1013     /* System call success, clear Carry condition code. */
1014     andn    %g3, %g2, %g3
1015     st  %g3, [%sp + STACKFRAME_SZ + PT_PSR] 
1016     bne linux_syscall_trace2
1017      ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1018     add %l1, 0x4, %l2           /* npc = npc+4 */
1019     st  %l1, [%sp + STACKFRAME_SZ + PT_PC]
1020     b   ret_trap_entry
1021      st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1022 1:
1023     /* System call failure, set Carry condition code.
1024      * Also, get abs(errno) to return to the process.
1025      */
1026     sub %g0, %o0, %o0
1027     or  %g3, %g2, %g3
1028     st  %o0, [%sp + STACKFRAME_SZ + PT_I0]
1029     st  %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1030     bne linux_syscall_trace2
1031      ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1032     add %l1, 0x4, %l2           /* npc = npc+4 */
1033     st  %l1, [%sp + STACKFRAME_SZ + PT_PC]
1034     b   ret_trap_entry
1035      st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1036 
1037 linux_syscall_trace2:
1038     add %sp, STACKFRAME_SZ, %o0
1039     mov 1, %o1
1040     call    syscall_trace
1041      add    %l1, 0x4, %l2           /* npc = npc+4 */
1042     st  %l1, [%sp + STACKFRAME_SZ + PT_PC]
1043     b   ret_trap_entry
1044      st %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1045 
1046 
1047 /* Saving and restoring the FPU state is best done from lowlevel code.
1048  *
1049  * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1050  *             void *fpqueue, unsigned long *fpqdepth)
1051  */
1052 
1053     .globl  fpsave
1054 fpsave:
1055     st  %fsr, [%o1] ! this can trap on us if fpu is in bogon state
1056     ld  [%o1], %g1
1057     set 0x2000, %g4
1058     andcc   %g1, %g4, %g0
1059     be  2f
1060      mov    0, %g2
1061 
1062     /* We have an fpqueue to save. */
1063 1:
1064     std %fq, [%o2]
1065 fpsave_magic:
1066     st  %fsr, [%o1]
1067     ld  [%o1], %g3
1068     andcc   %g3, %g4, %g0
1069     add %g2, 1, %g2
1070     bne 1b
1071      add    %o2, 8, %o2
1072 
1073 2:
1074     st  %g2, [%o3]
1075 
1076     std %f0, [%o0 + 0x00]
1077     std %f2, [%o0 + 0x08]
1078     std %f4, [%o0 + 0x10]
1079     std %f6, [%o0 + 0x18]
1080     std %f8, [%o0 + 0x20]
1081     std %f10, [%o0 + 0x28]
1082     std %f12, [%o0 + 0x30]
1083     std %f14, [%o0 + 0x38]
1084     std %f16, [%o0 + 0x40]
1085     std %f18, [%o0 + 0x48]
1086     std %f20, [%o0 + 0x50]
1087     std %f22, [%o0 + 0x58]
1088     std %f24, [%o0 + 0x60]
1089     std %f26, [%o0 + 0x68]
1090     std %f28, [%o0 + 0x70]
1091     retl
1092      std    %f30, [%o0 + 0x78]
1093 
1094     /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1095      * code for pointing out this possible deadlock, while we save state
1096      * above we could trap on the fsr store so our low level fpu trap
1097      * code has to know how to deal with this.
1098      */
1099 fpsave_catch:
1100     b   fpsave_magic + 4
1101      st %fsr, [%o1]
1102 
1103 fpsave_catch2:
1104     b   fpsave + 4
1105      st %fsr, [%o1]
1106 
1107     /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1108 
1109     .globl  fpload
1110 fpload:
1111     ldd [%o0 + 0x00], %f0
1112     ldd [%o0 + 0x08], %f2
1113     ldd [%o0 + 0x10], %f4
1114     ldd [%o0 + 0x18], %f6
1115     ldd [%o0 + 0x20], %f8
1116     ldd [%o0 + 0x28], %f10
1117     ldd [%o0 + 0x30], %f12
1118     ldd [%o0 + 0x38], %f14
1119     ldd [%o0 + 0x40], %f16
1120     ldd [%o0 + 0x48], %f18
1121     ldd [%o0 + 0x50], %f20
1122     ldd [%o0 + 0x58], %f22
1123     ldd [%o0 + 0x60], %f24
1124     ldd [%o0 + 0x68], %f26
1125     ldd [%o0 + 0x70], %f28
1126     ldd [%o0 + 0x78], %f30
1127     ld  [%o1], %fsr
1128     retl
1129      nop
1130 
1131     /* __ndelay and __udelay take two arguments:
1132      * 0 - nsecs or usecs to delay
1133      * 1 - per_cpu udelay_val (loops per jiffy)
1134      *
1135      * Note that ndelay gives HZ times higher resolution but has a 10ms
1136      * limit.  udelay can handle up to 1s.
1137      */
1138     .globl  __ndelay
1139 __ndelay:
1140     save    %sp, -STACKFRAME_SZ, %sp
1141     mov %i0, %o0        ! round multiplier up so large ns ok
1142     mov 0x1ae, %o1      ! 2**32 / (1 000 000 000 / HZ)
1143     umul    %o0, %o1, %o0
1144     rd  %y, %o1
1145     mov %i1, %o1        ! udelay_val
1146     umul    %o0, %o1, %o0
1147     rd  %y, %o1
1148     ba  delay_continue
1149      mov    %o1, %o0        ! >>32 later for better resolution
1150 
1151     .globl  __udelay
1152 __udelay:
1153     save    %sp, -STACKFRAME_SZ, %sp
1154     mov %i0, %o0
1155     sethi   %hi(0x10c7), %o1    ! round multiplier up so large us ok
1156     or  %o1, %lo(0x10c7), %o1   ! 2**32 / 1 000 000
1157     umul    %o0, %o1, %o0
1158     rd  %y, %o1
1159     mov %i1, %o1        ! udelay_val
1160     umul    %o0, %o1, %o0
1161     rd  %y, %o1
1162     sethi   %hi(0x028f4b62), %l0    ! Add in rounding constant * 2**32,
1163     or  %g0, %lo(0x028f4b62), %l0
1164     addcc   %o0, %l0, %o0       ! 2**32 * 0.009 999
1165     bcs,a   3f
1166      add    %o1, 0x01, %o1
1167 3:
1168     mov HZ, %o0         ! >>32 earlier for wider range
1169     umul    %o0, %o1, %o0
1170     rd  %y, %o1
1171 
1172 delay_continue:
1173     cmp %o0, 0x0
1174 1:
1175     bne 1b
1176      subcc  %o0, 1, %o0
1177     
1178     ret
1179     restore
1180 EXPORT_SYMBOL(__udelay)
1181 EXPORT_SYMBOL(__ndelay)
1182 
1183     /* Handle a software breakpoint */
1184     /* We have to inform parent that child has stopped */
1185     .align 4
1186     .globl breakpoint_trap
1187 breakpoint_trap:
1188     rd  %wim,%l3
1189     SAVE_ALL
1190     wr  %l0, PSR_ET, %psr
1191     WRITE_PAUSE
1192 
1193     st  %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1194     call    sparc_breakpoint
1195      add    %sp, STACKFRAME_SZ, %o0
1196 
1197     RESTORE_ALL
1198 
1199 #ifdef CONFIG_KGDB
1200     ENTRY(kgdb_trap_low)
1201     rd  %wim,%l3
1202     SAVE_ALL
1203     wr  %l0, PSR_ET, %psr
1204     WRITE_PAUSE
1205 
1206     mov %l7, %o0        ! trap_level
1207     call    kgdb_trap
1208      add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1209 
1210     RESTORE_ALL
1211     ENDPROC(kgdb_trap_low)
1212 #endif
1213 
1214     .align  4
1215     .globl  flush_patch_exception
1216 flush_patch_exception:
1217     FLUSH_ALL_KERNEL_WINDOWS;
1218     ldd [%o0], %o6
1219     jmpl    %o7 + 0xc, %g0          ! see asm-sparc/processor.h
1220      mov    1, %g1              ! signal EFAULT condition
1221 
1222     .align  4
1223     .globl  kill_user_windows, kuw_patch1_7win
1224     .globl  kuw_patch1
1225 kuw_patch1_7win:    sll %o3, 6, %o3
1226 
1227     /* No matter how much overhead this routine has in the worst
1228      * case scenario, it is several times better than taking the
1229      * traps with the old method of just doing flush_user_windows().
1230      */
1231 kill_user_windows:
1232     ld  [%g6 + TI_UWINMASK], %o0    ! get current umask
1233     orcc    %g0, %o0, %g0           ! if no bits set, we are done
1234     be  3f              ! nothing to do
1235      rd %psr, %o5           ! must clear interrupts
1236     or  %o5, PSR_PIL, %o4       ! or else that could change
1237     wr  %o4, 0x0, %psr          ! the uwinmask state
1238     WRITE_PAUSE             ! burn them cycles
1239 1:
1240     ld  [%g6 + TI_UWINMASK], %o0    ! get consistent state
1241     orcc    %g0, %o0, %g0           ! did an interrupt come in?
1242     be  4f              ! yep, we are done
1243      rd %wim, %o3           ! get current wim
1244     srl %o3, 1, %o4         ! simulate a save
1245 kuw_patch1:
1246     sll %o3, 7, %o3         ! compute next wim
1247     or  %o4, %o3, %o3           ! result
1248     andncc  %o0, %o3, %o0           ! clean this bit in umask
1249     bne kuw_patch1          ! not done yet
1250      srl    %o3, 1, %o4         ! begin another save simulation
1251     wr  %o3, 0x0, %wim          ! set the new wim
1252     st  %g0, [%g6 + TI_UWINMASK]    ! clear uwinmask
1253 4:
1254     wr  %o5, 0x0, %psr          ! re-enable interrupts
1255     WRITE_PAUSE             ! burn baby burn
1256 3:
1257     retl                    ! return
1258      st %g0, [%g6 + TI_W_SAVED]     ! no windows saved
1259 
1260     .align  4
1261     .globl  restore_current
1262 restore_current:
1263     LOAD_CURRENT(g6, o0)
1264     retl
1265      nop
1266 
1267 #ifdef CONFIG_PCIC_PCI
1268 #include <asm/pcic.h>
1269 
1270     .align  4
1271     .globl  linux_trap_ipi15_pcic
1272 linux_trap_ipi15_pcic:
1273     rd  %wim, %l3
1274     SAVE_ALL
1275 
1276     /*
1277      * First deactivate NMI
1278      * or we cannot drop ET, cannot get window spill traps.
1279      * The busy loop is necessary because the PIO error
1280      * sometimes does not go away quickly and we trap again.
1281      */
1282     sethi   %hi(pcic_regs), %o1
1283     ld  [%o1 + %lo(pcic_regs)], %o2
1284 
1285     ! Get pending status for printouts later.
1286     ld  [%o2 + PCI_SYS_INT_PENDING], %o0
1287 
1288     mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1289     stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
1290 1:
1291     ld  [%o2 + PCI_SYS_INT_PENDING], %o1
1292     andcc   %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1293     bne 1b
1294      nop
1295 
1296     or  %l0, PSR_PIL, %l4
1297     wr  %l4, 0x0, %psr
1298     WRITE_PAUSE
1299     wr  %l4, PSR_ET, %psr
1300     WRITE_PAUSE
1301 
1302     call    pcic_nmi
1303      add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1304     RESTORE_ALL
1305 
1306     .globl  pcic_nmi_trap_patch
1307 pcic_nmi_trap_patch:
1308     sethi   %hi(linux_trap_ipi15_pcic), %l3
1309     jmpl    %l3 + %lo(linux_trap_ipi15_pcic), %g0
1310      rd %psr, %l0
1311     .word   0
1312 
1313 #endif /* CONFIG_PCIC_PCI */
1314 
1315     .globl  flushw_all
1316 flushw_all:
1317     save    %sp, -0x40, %sp
1318     save    %sp, -0x40, %sp
1319     save    %sp, -0x40, %sp
1320     save    %sp, -0x40, %sp
1321     save    %sp, -0x40, %sp
1322     save    %sp, -0x40, %sp
1323     save    %sp, -0x40, %sp
1324     restore
1325     restore
1326     restore
1327     restore
1328     restore
1329     restore
1330     ret
1331      restore
1332 
1333 #ifdef CONFIG_SMP
1334 ENTRY(hard_smp_processor_id)
1335 661:    rd      %tbr, %g1
1336     srl     %g1, 12, %o0
1337     and     %o0, 3, %o0
1338     .section    .cpuid_patch, "ax"
1339     /* Instruction location. */
1340     .word       661b
1341     /* SUN4D implementation. */
1342     lda     [%g0] ASI_M_VIKING_TMP1, %o0
1343     nop
1344     nop
1345     /* LEON implementation. */
1346     rd      %asr17, %o0
1347     srl     %o0, 0x1c, %o0
1348     nop
1349     .previous
1350     retl
1351      nop
1352 ENDPROC(hard_smp_processor_id)
1353 #endif
1354 
1355 /* End of entry.S */