Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is subject to the terms and conditions of the GNU General Public
0003  * License.  See the file "COPYING" in the main directory of this archive
0004  * for more details.
0005  *
0006  * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
0007  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
0008  * Copyright (C) 2002, 2007  Maciej W. Rozycki
0009  * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
0010  */
0011 #include <linux/init.h>
0012 
0013 #include <asm/asm.h>
0014 #include <asm/asmmacro.h>
0015 #include <asm/cacheops.h>
0016 #include <asm/irqflags.h>
0017 #include <asm/regdef.h>
0018 #include <asm/fpregdef.h>
0019 #include <asm/mipsregs.h>
0020 #include <asm/stackframe.h>
0021 #include <asm/sync.h>
0022 #include <asm/thread_info.h>
0023 
0024     __INIT
0025 
0026 /*
0027  * General exception vector for all other CPUs.
0028  *
0029  * Be careful when changing this, it has to be at most 128 bytes
0030  * to fit into space reserved for the exception handler.
0031  */
0032 NESTED(except_vec3_generic, 0, sp)
0033     .set    push
0034     .set    noat
0035     mfc0    k1, CP0_CAUSE
0036     andi    k1, k1, 0x7c
0037 #ifdef CONFIG_64BIT
0038     dsll    k1, k1, 1
0039 #endif
0040     PTR_L   k0, exception_handlers(k1)
0041     jr  k0
0042     .set    pop
0043     END(except_vec3_generic)
0044 
0045 /*
0046  * General exception handler for CPUs with virtual coherency exception.
0047  *
0048  * Be careful when changing this, it has to be at most 256 (as a special
0049  * exception) bytes to fit into space reserved for the exception handler.
0050  */
0051 NESTED(except_vec3_r4000, 0, sp)
0052     .set    push
0053     .set    arch=r4000
0054     .set    noat
0055     mfc0    k1, CP0_CAUSE
0056     li  k0, 31<<2
0057     andi    k1, k1, 0x7c
0058     .set    push
0059     .set    noreorder
0060     .set    nomacro
0061     beq k1, k0, handle_vced
0062      li k0, 14<<2
0063     beq k1, k0, handle_vcei
0064 #ifdef CONFIG_64BIT
0065      dsll   k1, k1, 1
0066 #endif
0067     .set    pop
0068     PTR_L   k0, exception_handlers(k1)
0069     jr  k0
0070 
0071     /*
0072      * Big shit, we now may have two dirty primary cache lines for the same
0073      * physical address.  We can safely invalidate the line pointed to by
0074      * c0_badvaddr because after return from this exception handler the
0075      * load / store will be re-executed.
0076      */
0077 handle_vced:
0078     MFC0    k0, CP0_BADVADDR
0079     li  k1, -4                  # Is this ...
0080     and k0, k1                  # ... really needed?
0081     mtc0    zero, CP0_TAGLO
0082     cache   Index_Store_Tag_D, (k0)
0083     cache   Hit_Writeback_Inv_SD, (k0)
0084 #ifdef CONFIG_PROC_FS
0085     PTR_LA  k0, vced_count
0086     lw  k1, (k0)
0087     addiu   k1, 1
0088     sw  k1, (k0)
0089 #endif
0090     eret
0091 
0092 handle_vcei:
0093     MFC0    k0, CP0_BADVADDR
0094     cache   Hit_Writeback_Inv_SD, (k0)      # also cleans pi
0095 #ifdef CONFIG_PROC_FS
0096     PTR_LA  k0, vcei_count
0097     lw  k1, (k0)
0098     addiu   k1, 1
0099     sw  k1, (k0)
0100 #endif
0101     eret
0102     .set    pop
0103     END(except_vec3_r4000)
0104 
0105     __FINIT
0106 
0107     .align  5   /* 32 byte rollback region */
0108 LEAF(__r4k_wait)
0109     .set    push
0110     .set    noreorder
0111     /* start of rollback region */
0112     LONG_L  t0, TI_FLAGS($28)
0113     nop
0114     andi    t0, _TIF_NEED_RESCHED
0115     bnez    t0, 1f
0116      nop
0117     nop
0118     nop
0119 #ifdef CONFIG_CPU_MICROMIPS
0120     nop
0121     nop
0122     nop
0123     nop
0124 #endif
0125     .set    MIPS_ISA_ARCH_LEVEL_RAW
0126     wait
0127     /* end of rollback region (the region size must be power of two) */
0128 1:
0129     jr  ra
0130      nop
0131     .set    pop
0132     END(__r4k_wait)
0133 
0134     .macro  BUILD_ROLLBACK_PROLOGUE handler
0135     FEXPORT(rollback_\handler)
0136     .set    push
0137     .set    noat
0138     MFC0    k0, CP0_EPC
0139     PTR_LA  k1, __r4k_wait
0140     ori k0, 0x1f    /* 32 byte rollback region */
0141     xori    k0, 0x1f
0142     bne k0, k1, \handler
0143     MTC0    k0, CP0_EPC
0144     .set pop
0145     .endm
0146 
0147     .align  5
0148 BUILD_ROLLBACK_PROLOGUE handle_int
0149 NESTED(handle_int, PT_SIZE, sp)
0150     .cfi_signal_frame
0151 #ifdef CONFIG_TRACE_IRQFLAGS
0152     /*
0153      * Check to see if the interrupted code has just disabled
0154      * interrupts and ignore this interrupt for now if so.
0155      *
0156      * local_irq_disable() disables interrupts and then calls
0157      * trace_hardirqs_off() to track the state. If an interrupt is taken
0158      * after interrupts are disabled but before the state is updated
0159      * it will appear to restore_all that it is incorrectly returning with
0160      * interrupts disabled
0161      */
0162     .set    push
0163     .set    noat
0164     mfc0    k0, CP0_STATUS
0165 #if defined(CONFIG_CPU_R3000)
0166     and k0, ST0_IEP
0167     bnez    k0, 1f
0168 
0169     mfc0    k0, CP0_EPC
0170     .set    noreorder
0171     j   k0
0172      rfe
0173 #else
0174     and k0, ST0_IE
0175     bnez    k0, 1f
0176 
0177     eret
0178 #endif
0179 1:
0180     .set pop
0181 #endif
0182     SAVE_ALL docfi=1
0183     CLI
0184     TRACE_IRQS_OFF
0185 
0186     LONG_L  s0, TI_REGS($28)
0187     LONG_S  sp, TI_REGS($28)
0188 
0189     /*
0190      * SAVE_ALL ensures we are using a valid kernel stack for the thread.
0191      * Check if we are already using the IRQ stack.
0192      */
0193     move    s1, sp # Preserve the sp
0194 
0195     /* Get IRQ stack for this CPU */
0196     ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
0197 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
0198     lui k1, %hi(irq_stack)
0199 #else
0200     lui k1, %highest(irq_stack)
0201     daddiu  k1, %higher(irq_stack)
0202     dsll    k1, 16
0203     daddiu  k1, %hi(irq_stack)
0204     dsll    k1, 16
0205 #endif
0206     LONG_SRL    k0, SMP_CPUID_PTRSHIFT
0207     LONG_ADDU   k1, k0
0208     LONG_L  t0, %lo(irq_stack)(k1)
0209 
0210     # Check if already on IRQ stack
0211     PTR_LI  t1, ~(_THREAD_SIZE-1)
0212     and t1, t1, sp
0213     beq t0, t1, 2f
0214 
0215     /* Switch to IRQ stack */
0216     li  t1, _IRQ_STACK_START
0217     PTR_ADD sp, t0, t1
0218 
0219     /* Save task's sp on IRQ stack so that unwinding can follow it */
0220     LONG_S  s1, 0(sp)
0221 2:
0222     jal plat_irq_dispatch
0223 
0224     /* Restore sp */
0225     move    sp, s1
0226 
0227     j   ret_from_irq
0228 #ifdef CONFIG_CPU_MICROMIPS
0229     nop
0230 #endif
0231     END(handle_int)
0232 
0233     __INIT
0234 
0235 /*
0236  * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
0237  * This is a dedicated interrupt exception vector which reduces the
0238  * interrupt processing overhead.  The jump instruction will be replaced
0239  * at the initialization time.
0240  *
0241  * Be careful when changing this, it has to be at most 128 bytes
0242  * to fit into space reserved for the exception handler.
0243  */
0244 NESTED(except_vec4, 0, sp)
0245 1:  j   1b          /* Dummy, will be replaced */
0246     END(except_vec4)
0247 
0248 /*
0249  * EJTAG debug exception handler.
0250  * The EJTAG debug exception entry point is 0xbfc00480, which
0251  * normally is in the boot PROM, so the boot PROM must do an
0252  * unconditional jump to this vector.
0253  */
0254 NESTED(except_vec_ejtag_debug, 0, sp)
0255     j   ejtag_debug_handler
0256 #ifdef CONFIG_CPU_MICROMIPS
0257      nop
0258 #endif
0259     END(except_vec_ejtag_debug)
0260 
0261     __FINIT
0262 
0263 /*
0264  * Vectored interrupt handler.
0265  * This prototype is copied to ebase + n*IntCtl.VS and patched
0266  * to invoke the handler
0267  */
0268 BUILD_ROLLBACK_PROLOGUE except_vec_vi
0269 NESTED(except_vec_vi, 0, sp)
0270     SAVE_SOME docfi=1
0271     SAVE_AT docfi=1
0272     .set    push
0273     .set    noreorder
0274     PTR_LA  v1, except_vec_vi_handler
0275 FEXPORT(except_vec_vi_lui)
0276     lui v0, 0       /* Patched */
0277     jr  v1
0278 FEXPORT(except_vec_vi_ori)
0279      ori    v0, 0       /* Patched */
0280     .set    pop
0281     END(except_vec_vi)
0282 EXPORT(except_vec_vi_end)
0283 
0284 /*
0285  * Common Vectored Interrupt code
0286  * Complete the register saves and invoke the handler which is passed in $v0
0287  */
0288 NESTED(except_vec_vi_handler, 0, sp)
0289     SAVE_TEMP
0290     SAVE_STATIC
0291     CLI
0292 #ifdef CONFIG_TRACE_IRQFLAGS
0293     move    s0, v0
0294     TRACE_IRQS_OFF
0295     move    v0, s0
0296 #endif
0297 
0298     LONG_L  s0, TI_REGS($28)
0299     LONG_S  sp, TI_REGS($28)
0300 
0301     /*
0302      * SAVE_ALL ensures we are using a valid kernel stack for the thread.
0303      * Check if we are already using the IRQ stack.
0304      */
0305     move    s1, sp # Preserve the sp
0306 
0307     /* Get IRQ stack for this CPU */
0308     ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
0309 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
0310     lui k1, %hi(irq_stack)
0311 #else
0312     lui k1, %highest(irq_stack)
0313     daddiu  k1, %higher(irq_stack)
0314     dsll    k1, 16
0315     daddiu  k1, %hi(irq_stack)
0316     dsll    k1, 16
0317 #endif
0318     LONG_SRL    k0, SMP_CPUID_PTRSHIFT
0319     LONG_ADDU   k1, k0
0320     LONG_L  t0, %lo(irq_stack)(k1)
0321 
0322     # Check if already on IRQ stack
0323     PTR_LI  t1, ~(_THREAD_SIZE-1)
0324     and t1, t1, sp
0325     beq t0, t1, 2f
0326 
0327     /* Switch to IRQ stack */
0328     li  t1, _IRQ_STACK_START
0329     PTR_ADD sp, t0, t1
0330 
0331     /* Save task's sp on IRQ stack so that unwinding can follow it */
0332     LONG_S  s1, 0(sp)
0333 2:
0334     jalr    v0
0335 
0336     /* Restore sp */
0337     move    sp, s1
0338 
0339     j   ret_from_irq
0340     END(except_vec_vi_handler)
0341 
0342 /*
0343  * EJTAG debug exception handler.
0344  */
0345 NESTED(ejtag_debug_handler, PT_SIZE, sp)
0346     .set    push
0347     .set    noat
0348     MTC0    k0, CP0_DESAVE
0349     mfc0    k0, CP0_DEBUG
0350 
0351     andi    k0, k0, MIPS_DEBUG_DBP  # Check for SDBBP.
0352     beqz    k0, ejtag_return
0353 
0354 #ifdef CONFIG_SMP
0355 1:  PTR_LA  k0, ejtag_debug_buffer_spinlock
0356     __SYNC(full, loongson3_war)
0357 2:  ll  k0, 0(k0)
0358     bnez    k0, 2b
0359     PTR_LA  k0, ejtag_debug_buffer_spinlock
0360     sc  k0, 0(k0)
0361     beqz    k0, 1b
0362 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
0363     sync
0364 # endif
0365 
0366     PTR_LA  k0, ejtag_debug_buffer
0367     LONG_S  k1, 0(k0)
0368 
0369     ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
0370     PTR_SRL k1, SMP_CPUID_PTRSHIFT
0371     PTR_SLL k1, LONGLOG
0372     PTR_LA  k0, ejtag_debug_buffer_per_cpu
0373     PTR_ADDU k0, k1
0374 
0375     PTR_LA  k1, ejtag_debug_buffer
0376     LONG_L  k1, 0(k1)
0377     LONG_S  k1, 0(k0)
0378 
0379     PTR_LA  k0, ejtag_debug_buffer_spinlock
0380     sw  zero, 0(k0)
0381 #else
0382     PTR_LA  k0, ejtag_debug_buffer
0383     LONG_S  k1, 0(k0)
0384 #endif
0385 
0386     SAVE_ALL
0387     move    a0, sp
0388     jal ejtag_exception_handler
0389     RESTORE_ALL
0390 
0391 #ifdef CONFIG_SMP
0392     ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
0393     PTR_SRL k1, SMP_CPUID_PTRSHIFT
0394     PTR_SLL k1, LONGLOG
0395     PTR_LA  k0, ejtag_debug_buffer_per_cpu
0396     PTR_ADDU k0, k1
0397     LONG_L  k1, 0(k0)
0398 #else
0399     PTR_LA  k0, ejtag_debug_buffer
0400     LONG_L  k1, 0(k0)
0401 #endif
0402 
0403 ejtag_return:
0404     back_to_back_c0_hazard
0405     MFC0    k0, CP0_DESAVE
0406     .set    mips32
0407     deret
0408     .set    pop
0409     END(ejtag_debug_handler)
0410 
0411 /*
0412  * This buffer is reserved for the use of the EJTAG debug
0413  * handler.
0414  */
0415     .data
0416 EXPORT(ejtag_debug_buffer)
0417     .fill   LONGSIZE
0418 #ifdef CONFIG_SMP
0419 EXPORT(ejtag_debug_buffer_spinlock)
0420     .fill   LONGSIZE
0421 EXPORT(ejtag_debug_buffer_per_cpu)
0422     .fill   LONGSIZE * NR_CPUS
0423 #endif
0424     .previous
0425 
0426     __INIT
0427 
0428 /*
0429  * NMI debug exception handler for MIPS reference boards.
0430  * The NMI debug exception entry point is 0xbfc00000, which
0431  * normally is in the boot PROM, so the boot PROM must do a
0432  * unconditional jump to this vector.
0433  */
0434 NESTED(except_vec_nmi, 0, sp)
0435     j   nmi_handler
0436 #ifdef CONFIG_CPU_MICROMIPS
0437      nop
0438 #endif
0439     END(except_vec_nmi)
0440 
0441     __FINIT
0442 
0443 NESTED(nmi_handler, PT_SIZE, sp)
0444     .cfi_signal_frame
0445     .set    push
0446     .set    noat
0447     /*
0448      * Clear ERL - restore segment mapping
0449      * Clear BEV - required for page fault exception handler to work
0450      */
0451     mfc0    k0, CP0_STATUS
0452     ori k0, k0, ST0_EXL
0453     li  k1, ~(ST0_BEV | ST0_ERL)
0454     and k0, k0, k1
0455     mtc0    k0, CP0_STATUS
0456     _ehb
0457     SAVE_ALL
0458     move    a0, sp
0459     jal nmi_exception_handler
0460     /* nmi_exception_handler never returns */
0461     .set    pop
0462     END(nmi_handler)
0463 
0464     .macro  __build_clear_none
0465     .endm
0466 
0467     .macro  __build_clear_sti
0468     TRACE_IRQS_ON
0469     STI
0470     .endm
0471 
0472     .macro  __build_clear_cli
0473     CLI
0474     TRACE_IRQS_OFF
0475     .endm
0476 
0477     .macro  __build_clear_fpe
0478     CLI
0479     TRACE_IRQS_OFF
0480     .set    push
0481     /* gas fails to assemble cfc1 for some archs (octeon).*/ \
0482     .set    mips1
0483     SET_HARDFLOAT
0484     cfc1    a1, fcr31
0485     .set    pop
0486     .endm
0487 
0488     .macro  __build_clear_msa_fpe
0489     CLI
0490     TRACE_IRQS_OFF
0491     _cfcmsa a1, MSA_CSR
0492     .endm
0493 
0494     .macro  __build_clear_ade
0495     MFC0    t0, CP0_BADVADDR
0496     PTR_S   t0, PT_BVADDR(sp)
0497     KMODE
0498     .endm
0499 
0500     .macro __build_clear_gsexc
0501     .set    push
0502     /*
0503      * We need to specify a selector to access the CP0.Diag1 (GSCause)
0504      * register. All GSExc-equipped processors have MIPS32.
0505      */
0506     .set    mips32
0507     mfc0    a1, CP0_DIAGNOSTIC1
0508     .set    pop
0509     TRACE_IRQS_ON
0510     STI
0511     .endm
0512 
0513     .macro  __BUILD_silent exception
0514     .endm
0515 
0516     /* Gas tries to parse the ASM_PRINT argument as a string containing
0517        string escapes and emits bogus warnings if it believes to
0518        recognize an unknown escape code.  So make the arguments
0519        start with an n and gas will believe \n is ok ...  */
0520     .macro  __BUILD_verbose nexception
0521     LONG_L  a1, PT_EPC(sp)
0522 #ifdef CONFIG_32BIT
0523     ASM_PRINT("Got \nexception at %08lx\012")
0524 #endif
0525 #ifdef CONFIG_64BIT
0526     ASM_PRINT("Got \nexception at %016lx\012")
0527 #endif
0528     .endm
0529 
0530     .macro  __BUILD_count exception
0531     LONG_L  t0,exception_count_\exception
0532     LONG_ADDIU  t0, 1
0533     LONG_S  t0,exception_count_\exception
0534     .comm   exception_count\exception, 8, 8
0535     .endm
0536 
0537     .macro  __BUILD_HANDLER exception handler clear verbose ext
0538     .align  5
0539     NESTED(handle_\exception, PT_SIZE, sp)
0540     .cfi_signal_frame
0541     .set    noat
0542     SAVE_ALL
0543     FEXPORT(handle_\exception\ext)
0544     __build_clear_\clear
0545     .set    at
0546     __BUILD_\verbose \exception
0547     move    a0, sp
0548     jal do_\handler
0549     j   ret_from_exception
0550     END(handle_\exception)
0551     .endm
0552 
0553     .macro  BUILD_HANDLER exception handler clear verbose
0554     __BUILD_HANDLER \exception \handler \clear \verbose _int
0555     .endm
0556 
0557     BUILD_HANDLER adel ade ade silent       /* #4  */
0558     BUILD_HANDLER ades ade ade silent       /* #5  */
0559     BUILD_HANDLER ibe be cli silent         /* #6  */
0560     BUILD_HANDLER dbe be cli silent         /* #7  */
0561     BUILD_HANDLER bp bp sti silent          /* #9  */
0562     BUILD_HANDLER ri ri sti silent          /* #10 */
0563     BUILD_HANDLER cpu cpu sti silent        /* #11 */
0564     BUILD_HANDLER ov ov sti silent          /* #12 */
0565     BUILD_HANDLER tr tr sti silent          /* #13 */
0566     BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent    /* #14 */
0567 #ifdef CONFIG_MIPS_FP_SUPPORT
0568     BUILD_HANDLER fpe fpe fpe silent        /* #15 */
0569 #endif
0570     BUILD_HANDLER ftlb ftlb none silent     /* #16 */
0571     BUILD_HANDLER gsexc gsexc gsexc silent      /* #16 */
0572     BUILD_HANDLER msa msa sti silent        /* #21 */
0573     BUILD_HANDLER mdmx mdmx sti silent      /* #22 */
0574 #ifdef  CONFIG_HARDWARE_WATCHPOINTS
0575     /*
0576      * For watch, interrupts will be enabled after the watch
0577      * registers are read.
0578      */
0579     BUILD_HANDLER watch watch cli silent        /* #23 */
0580 #else
0581     BUILD_HANDLER watch watch sti verbose       /* #23 */
0582 #endif
0583     BUILD_HANDLER mcheck mcheck cli verbose     /* #24 */
0584     BUILD_HANDLER mt mt sti silent          /* #25 */
0585     BUILD_HANDLER dsp dsp sti silent        /* #26 */
0586     BUILD_HANDLER reserved reserved sti verbose /* others */
0587 
0588     .align  5
0589     LEAF(handle_ri_rdhwr_tlbp)
0590     .set    push
0591     .set    noat
0592     .set    noreorder
0593     /* check if TLB contains a entry for EPC */
0594     MFC0    k1, CP0_ENTRYHI
0595     andi    k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
0596     MFC0    k0, CP0_EPC
0597     PTR_SRL k0, _PAGE_SHIFT + 1
0598     PTR_SLL k0, _PAGE_SHIFT + 1
0599     or  k1, k0
0600     MTC0    k1, CP0_ENTRYHI
0601     mtc0_tlbw_hazard
0602     tlbp
0603     tlb_probe_hazard
0604     mfc0    k1, CP0_INDEX
0605     .set    pop
0606     bltz    k1, handle_ri   /* slow path */
0607     /* fall thru */
0608     END(handle_ri_rdhwr_tlbp)
0609 
0610     LEAF(handle_ri_rdhwr)
0611     .set    push
0612     .set    noat
0613     .set    noreorder
0614     /* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
0615     /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
0616     MFC0    k1, CP0_EPC
0617 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
0618     and k0, k1, 1
0619     beqz    k0, 1f
0620      xor    k1, k0
0621     lhu k0, (k1)
0622     lhu k1, 2(k1)
0623     ins k1, k0, 16, 16
0624     lui k0, 0x007d
0625     b   docheck
0626      ori    k0, 0x6b3c
0627 1:
0628     lui k0, 0x7c03
0629     lw  k1, (k1)
0630     ori k0, 0xe83b
0631 #else
0632     andi    k0, k1, 1
0633     bnez    k0, handle_ri
0634      lui    k0, 0x7c03
0635     lw  k1, (k1)
0636     ori k0, 0xe83b
0637 #endif
0638     .set    reorder
0639 docheck:
0640     bne k0, k1, handle_ri   /* if not ours */
0641 
0642 isrdhwr:
0643     /* The insn is rdhwr.  No need to check CAUSE.BD here. */
0644     get_saved_sp    /* k1 := current_thread_info */
0645     .set    noreorder
0646     MFC0    k0, CP0_EPC
0647 #if defined(CONFIG_CPU_R3000)
0648     ori k1, _THREAD_MASK
0649     xori    k1, _THREAD_MASK
0650     LONG_L  v1, TI_TP_VALUE(k1)
0651     LONG_ADDIU  k0, 4
0652     jr  k0
0653      rfe
0654 #else
0655 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
0656     LONG_ADDIU  k0, 4       /* stall on $k0 */
0657 #else
0658     .set    at=v1
0659     LONG_ADDIU  k0, 4
0660     .set    noat
0661 #endif
0662     MTC0    k0, CP0_EPC
0663     /* I hope three instructions between MTC0 and ERET are enough... */
0664     ori k1, _THREAD_MASK
0665     xori    k1, _THREAD_MASK
0666     LONG_L  v1, TI_TP_VALUE(k1)
0667     .set    push
0668     .set    arch=r4000
0669     eret
0670     .set    pop
0671 #endif
0672     .set    pop
0673     END(handle_ri_rdhwr)
0674 
0675 #ifdef CONFIG_CPU_R4X00_BUGS64
0676 /* A temporary overflow handler used by check_daddi(). */
0677 
0678     __INIT
0679 
0680     BUILD_HANDLER  daddi_ov daddi_ov none silent    /* #12 */
0681 #endif