Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002     /* We need to carefully read the error status, ACK the errors,
0003      * prevent recursive traps, and pass the information on to C
0004      * code for logging.
0005      *
0006      * We pass the AFAR in as-is, and we encode the status
0007      * information as described in asm-sparc64/sfafsr.h
0008      */
0009     .type       __spitfire_access_error,#function
0010 __spitfire_access_error:
0011     /* Disable ESTATE error reporting so that we do not take
0012      * recursive traps and RED state the processor.
0013      */
0014     stxa        %g0, [%g0] ASI_ESTATE_ERROR_EN
0015     membar      #Sync
0016 
0017     mov     UDBE_UE, %g1
0018     ldxa        [%g0] ASI_AFSR, %g4 ! Get AFSR
0019 
0020     /* __spitfire_cee_trap branches here with AFSR in %g4 and
0021      * UDBE_CE in %g1.  It only clears ESTATE_ERR_CE in the ESTATE
0022      * Error Enable register.
0023      */
0024 __spitfire_cee_trap_continue:
0025     ldxa        [%g0] ASI_AFAR, %g5 ! Get AFAR
0026 
0027     rdpr        %tt, %g3
0028     and     %g3, 0x1ff, %g3     ! Paranoia
0029     sllx        %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
0030     or      %g4, %g3, %g4
0031     rdpr        %tl, %g3
0032     cmp     %g3, 1
0033     mov     1, %g3
0034     bleu        %xcc, 1f
0035      sllx       %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
0036 
0037     or      %g4, %g3, %g4
0038 
0039     /* Read in the UDB error register state, clearing the sticky
0040      * error bits as-needed.  We only clear them if the UE bit is
0041      * set.  Likewise, __spitfire_cee_trap below will only do so
0042      * if the CE bit is set.
0043      *
0044      * NOTE: UltraSparc-I/II have high and low UDB error
0045      *       registers, corresponding to the two UDB units
0046      *       present on those chips.  UltraSparc-IIi only
0047      *       has a single UDB, called "SDB" in the manual.
0048      *       For IIi the upper UDB register always reads
0049      *       as zero so for our purposes things will just
0050      *       work with the checks below.
0051      */
0052 1:  ldxa        [%g0] ASI_UDBH_ERROR_R, %g3
0053     and     %g3, 0x3ff, %g7     ! Paranoia
0054     sllx        %g7, SFSTAT_UDBH_SHIFT, %g7
0055     or      %g4, %g7, %g4
0056     andcc       %g3, %g1, %g3       ! UDBE_UE or UDBE_CE
0057     be,pn       %xcc, 1f
0058      nop
0059     stxa        %g3, [%g0] ASI_UDB_ERROR_W
0060     membar      #Sync
0061 
0062 1:  mov     0x18, %g3
0063     ldxa        [%g3] ASI_UDBL_ERROR_R, %g3
0064     and     %g3, 0x3ff, %g7     ! Paranoia
0065     sllx        %g7, SFSTAT_UDBL_SHIFT, %g7
0066     or      %g4, %g7, %g4
0067     andcc       %g3, %g1, %g3       ! UDBE_UE or UDBE_CE
0068     be,pn       %xcc, 1f
0069      nop
0070     mov     0x18, %g7
0071     stxa        %g3, [%g7] ASI_UDB_ERROR_W
0072     membar      #Sync
0073 
0074 1:  /* Ok, now that we've latched the error state, clear the
0075      * sticky bits in the AFSR.
0076      */
0077     stxa        %g4, [%g0] ASI_AFSR
0078     membar      #Sync
0079 
0080     rdpr        %tl, %g2
0081     cmp     %g2, 1
0082     rdpr        %pil, %g2
0083     bleu,pt     %xcc, 1f
0084      wrpr       %g0, PIL_NORMAL_MAX, %pil
0085 
0086     ba,pt       %xcc, etraptl1
0087      rd     %pc, %g7
0088 
0089     ba,a,pt     %xcc, 2f
0090      nop
0091 
0092 1:  ba,pt       %xcc, etrap_irq
0093      rd     %pc, %g7
0094 
0095 2:
0096 #ifdef CONFIG_TRACE_IRQFLAGS
0097     call    trace_hardirqs_off
0098      nop
0099 #endif
0100     mov     %l4, %o1
0101     mov     %l5, %o2
0102     call        spitfire_access_error
0103      add        %sp, PTREGS_OFF, %o0
0104     ba,a,pt     %xcc, rtrap
0105     .size       __spitfire_access_error,.-__spitfire_access_error
0106 
0107     /* This is the trap handler entry point for ECC correctable
0108      * errors.  They are corrected, but we listen for the trap so
0109      * that the event can be logged.
0110      *
0111      * Disrupting errors are either:
0112      * 1) single-bit ECC errors during UDB reads to system
0113      *    memory
0114      * 2) data parity errors during write-back events
0115      *
0116      * As far as I can make out from the manual, the CEE trap is
0117      * only for correctable errors during memory read accesses by
0118      * the front-end of the processor.
0119      *
0120      * The code below is only for trap level 1 CEE events, as it
0121      * is the only situation where we can safely record and log.
0122      * For trap level >1 we just clear the CE bit in the AFSR and
0123      * return.
0124      *
0125      * This is just like __spiftire_access_error above, but it
0126      * specifically handles correctable errors.  If an
0127      * uncorrectable error is indicated in the AFSR we will branch
0128      * directly above to __spitfire_access_error to handle it
0129      * instead.  Uncorrectable therefore takes priority over
0130      * correctable, and the error logging C code will notice this
0131      * case by inspecting the trap type.
0132      */
0133     .type       __spitfire_cee_trap,#function
0134 __spitfire_cee_trap:
0135     ldxa        [%g0] ASI_AFSR, %g4 ! Get AFSR
0136     mov     1, %g3
0137     sllx        %g3, SFAFSR_UE_SHIFT, %g3
0138     andcc       %g4, %g3, %g0       ! Check for UE
0139     bne,pn      %xcc, __spitfire_access_error
0140      nop
0141 
0142     /* Ok, in this case we only have a correctable error.
0143      * Indicate we only wish to capture that state in register
0144      * %g1, and we only disable CE error reporting unlike UE
0145      * handling which disables all errors.
0146      */
0147     ldxa        [%g0] ASI_ESTATE_ERROR_EN, %g3
0148     andn        %g3, ESTATE_ERR_CE, %g3
0149     stxa        %g3, [%g0] ASI_ESTATE_ERROR_EN
0150     membar      #Sync
0151 
0152     /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
0153     ba,pt       %xcc, __spitfire_cee_trap_continue
0154      mov        UDBE_CE, %g1
0155     .size       __spitfire_cee_trap,.-__spitfire_cee_trap
0156 
0157     .type       __spitfire_data_access_exception_tl1,#function
0158 __spitfire_data_access_exception_tl1:
0159     rdpr        %pstate, %g4
0160     wrpr        %g4, PSTATE_MG|PSTATE_AG, %pstate
0161     mov     TLB_SFSR, %g3
0162     mov     DMMU_SFAR, %g5
0163     ldxa        [%g3] ASI_DMMU, %g4 ! Get SFSR
0164     ldxa        [%g5] ASI_DMMU, %g5 ! Get SFAR
0165     stxa        %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
0166     membar      #Sync
0167     rdpr        %tt, %g3
0168     cmp     %g3, 0x80       ! first win spill/fill trap
0169     blu,pn      %xcc, 1f
0170      cmp        %g3, 0xff       ! last win spill/fill trap
0171     bgu,pn      %xcc, 1f
0172      nop
0173     ba,pt       %xcc, winfix_dax
0174      rdpr       %tpc, %g3
0175 1:  sethi       %hi(109f), %g7
0176     ba,pt       %xcc, etraptl1
0177 109:     or     %g7, %lo(109b), %g7
0178     mov     %l4, %o1
0179     mov     %l5, %o2
0180     call        spitfire_data_access_exception_tl1
0181      add        %sp, PTREGS_OFF, %o0
0182     ba,a,pt     %xcc, rtrap
0183     .size       __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
0184 
0185     .type       __spitfire_data_access_exception,#function
0186 __spitfire_data_access_exception:
0187     rdpr        %pstate, %g4
0188     wrpr        %g4, PSTATE_MG|PSTATE_AG, %pstate
0189     mov     TLB_SFSR, %g3
0190     mov     DMMU_SFAR, %g5
0191     ldxa        [%g3] ASI_DMMU, %g4 ! Get SFSR
0192     ldxa        [%g5] ASI_DMMU, %g5 ! Get SFAR
0193     stxa        %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
0194     membar      #Sync
0195     sethi       %hi(109f), %g7
0196     ba,pt       %xcc, etrap
0197 109:     or     %g7, %lo(109b), %g7
0198     mov     %l4, %o1
0199     mov     %l5, %o2
0200     call        spitfire_data_access_exception
0201      add        %sp, PTREGS_OFF, %o0
0202     ba,a,pt     %xcc, rtrap
0203     .size       __spitfire_data_access_exception,.-__spitfire_data_access_exception
0204 
0205     .type       __spitfire_insn_access_exception_tl1,#function
0206 __spitfire_insn_access_exception_tl1:
0207     rdpr        %pstate, %g4
0208     wrpr        %g4, PSTATE_MG|PSTATE_AG, %pstate
0209     mov     TLB_SFSR, %g3
0210     ldxa        [%g3] ASI_IMMU, %g4 ! Get SFSR
0211     rdpr        %tpc, %g5       ! IMMU has no SFAR, use TPC
0212     stxa        %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
0213     membar      #Sync
0214     sethi       %hi(109f), %g7
0215     ba,pt       %xcc, etraptl1
0216 109:     or     %g7, %lo(109b), %g7
0217     mov     %l4, %o1
0218     mov     %l5, %o2
0219     call        spitfire_insn_access_exception_tl1
0220      add        %sp, PTREGS_OFF, %o0
0221     ba,a,pt     %xcc, rtrap
0222     .size       __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
0223 
0224     .type       __spitfire_insn_access_exception,#function
0225 __spitfire_insn_access_exception:
0226     rdpr        %pstate, %g4
0227     wrpr        %g4, PSTATE_MG|PSTATE_AG, %pstate
0228     mov     TLB_SFSR, %g3
0229     ldxa        [%g3] ASI_IMMU, %g4 ! Get SFSR
0230     rdpr        %tpc, %g5       ! IMMU has no SFAR, use TPC
0231     stxa        %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
0232     membar      #Sync
0233     sethi       %hi(109f), %g7
0234     ba,pt       %xcc, etrap
0235 109:     or     %g7, %lo(109b), %g7
0236     mov     %l4, %o1
0237     mov     %l5, %o2
0238     call        spitfire_insn_access_exception
0239      add        %sp, PTREGS_OFF, %o0
0240     ba,a,pt     %xcc, rtrap
0241     .size       __spitfire_insn_access_exception,.-__spitfire_insn_access_exception