Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /*
0003  * rtrap.S: Preparing for return from trap on Sparc V9.
0004  *
0005  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
0006  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
0007  */
0008 
0009 
0010 #include <asm/asi.h>
0011 #include <asm/pstate.h>
0012 #include <asm/ptrace.h>
0013 #include <asm/spitfire.h>
0014 #include <asm/head.h>
0015 #include <asm/visasm.h>
0016 #include <asm/processor.h>
0017 
0018 #ifdef CONFIG_CONTEXT_TRACKING_USER
0019 # define SCHEDULE_USER schedule_user
0020 #else
0021 # define SCHEDULE_USER schedule
0022 #endif
0023 
0024         .text
0025         .align          32
0026 __handle_preemption:
0027         call            SCHEDULE_USER
0028 661:         wrpr           %g0, RTRAP_PSTATE, %pstate
0029         /* If userspace is using ADI, it could potentially pass
0030          * a pointer with version tag embedded in it. To maintain
0031          * the ADI security, we must re-enable PSTATE.mcde before
0032          * we continue execution in the kernel for another thread.
0033          */
0034         .section .sun_m7_1insn_patch, "ax"
0035         .word   661b
0036          wrpr           %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
0037         .previous
0038         ba,pt           %xcc, __handle_preemption_continue
0039          wrpr           %g0, RTRAP_PSTATE_IRQOFF, %pstate
0040 
0041 __handle_user_windows:
0042         add         %sp, PTREGS_OFF, %o0
0043         call            fault_in_user_windows
0044 661:         wrpr           %g0, RTRAP_PSTATE, %pstate
0045         /* If userspace is using ADI, it could potentially pass
0046          * a pointer with version tag embedded in it. To maintain
0047          * the ADI security, we must re-enable PSTATE.mcde before
0048          * we continue execution in the kernel for another thread.
0049          */
0050         .section .sun_m7_1insn_patch, "ax"
0051         .word   661b
0052          wrpr           %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
0053         .previous
0054         ba,pt           %xcc, __handle_preemption_continue
0055          wrpr           %g0, RTRAP_PSTATE_IRQOFF, %pstate
0056 
0057 __handle_userfpu:
0058         rd          %fprs, %l5
0059         andcc           %l5, FPRS_FEF, %g0
0060         sethi           %hi(TSTATE_PEF), %o0
0061         be,a,pn         %icc, __handle_userfpu_continue
0062          andn           %l1, %o0, %l1
0063         ba,a,pt         %xcc, __handle_userfpu_continue
0064 
0065 __handle_signal:
0066         mov         %l5, %o1
0067         add         %sp, PTREGS_OFF, %o0
0068         mov         %l0, %o2
0069         call            do_notify_resume
0070 661:         wrpr           %g0, RTRAP_PSTATE, %pstate
0071         /* If userspace is using ADI, it could potentially pass
0072          * a pointer with version tag embedded in it. To maintain
0073          * the ADI security, we must re-enable PSTATE.mcde before
0074          * we continue execution in the kernel for another thread.
0075          */
0076         .section .sun_m7_1insn_patch, "ax"
0077         .word   661b
0078          wrpr           %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
0079         .previous
0080         wrpr            %g0, RTRAP_PSTATE_IRQOFF, %pstate
0081 
0082         /* Signal delivery can modify pt_regs tstate, so we must
0083          * reload it.
0084          */
0085         ldx         [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
0086         sethi           %hi(0xf << 20), %l4
0087         and         %l1, %l4, %l4
0088         andn            %l1, %l4, %l1
0089         ba,pt           %xcc, __handle_preemption_continue
0090          srl            %l4, 20, %l4
0091 
0092         /* When returning from a NMI (%pil==15) interrupt we want to
0093          * avoid running softirqs, doing IRQ tracing, preempting, etc.
0094          */
0095         .globl          rtrap_nmi
0096 rtrap_nmi:  ldx         [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
0097         sethi           %hi(0xf << 20), %l4
0098         and         %l1, %l4, %l4
0099         andn            %l1, %l4, %l1
0100         srl         %l4, 20, %l4
0101         ba,pt           %xcc, rtrap_no_irq_enable
0102         nop
0103         /* Do not actually set the %pil here.  We will do that
0104          * below after we clear PSTATE_IE in the %pstate register.
0105          * If we re-enable interrupts here, we can recurse down
0106          * the hardirq stack potentially endlessly, causing a
0107          * stack overflow.
0108          */
0109 
0110         .align          64
0111         .globl          rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
0112 rtrap_irq:
0113 rtrap:
0114         /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
0115         ldx         [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
0116 rtrap_xcall:
0117         sethi           %hi(0xf << 20), %l4
0118         and         %l1, %l4, %l4
0119         andn            %l1, %l4, %l1
0120         srl         %l4, 20, %l4
0121 #ifdef CONFIG_TRACE_IRQFLAGS
0122         brnz,pn         %l4, rtrap_no_irq_enable
0123          nop
0124         call            trace_hardirqs_on
0125          nop
0126         /* Do not actually set the %pil here.  We will do that
0127          * below after we clear PSTATE_IE in the %pstate register.
0128          * If we re-enable interrupts here, we can recurse down
0129          * the hardirq stack potentially endlessly, causing a
0130          * stack overflow.
0131          *
0132          * It is tempting to put this test and trace_hardirqs_on
0133          * call at the 'rt_continue' label, but that will not work
0134          * as that path hits unconditionally and we do not want to
0135          * execute this in NMI return paths, for example.
0136          */
0137 #endif
0138 rtrap_no_irq_enable:
0139         andcc           %l1, TSTATE_PRIV, %l3
0140         bne,pn          %icc, to_kernel
0141          nop
0142 
0143         /* We must hold IRQs off and atomically test schedule+signal
0144          * state, then hold them off all the way back to userspace.
0145          * If we are returning to kernel, none of this matters.  Note
0146          * that we are disabling interrupts via PSTATE_IE, not using
0147          * %pil.
0148          *
0149          * If we do not do this, there is a window where we would do
0150          * the tests, later the signal/resched event arrives but we do
0151          * not process it since we are still in kernel mode.  It would
0152          * take until the next local IRQ before the signal/resched
0153          * event would be handled.
0154          *
0155          * This also means that if we have to deal with user
0156          * windows, we have to redo all of these sched+signal checks
0157          * with IRQs disabled.
0158          */
0159 to_user:    wrpr            %g0, RTRAP_PSTATE_IRQOFF, %pstate
0160         wrpr            0, %pil
0161 __handle_preemption_continue:
0162         ldx         [%g6 + TI_FLAGS], %l0
0163         sethi           %hi(_TIF_USER_WORK_MASK), %o0
0164         or          %o0, %lo(_TIF_USER_WORK_MASK), %o0
0165         andcc           %l0, %o0, %g0
0166         sethi           %hi(TSTATE_PEF), %o0
0167         be,pt           %xcc, user_nowork
0168          andcc          %l1, %o0, %g0
0169         andcc           %l0, _TIF_NEED_RESCHED, %g0
0170         bne,pn          %xcc, __handle_preemption
0171          andcc          %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
0172         bne,pn          %xcc, __handle_signal
0173          ldub           [%g6 + TI_WSAVED], %o2
0174         brnz,pn         %o2, __handle_user_windows
0175          nop
0176         sethi           %hi(TSTATE_PEF), %o0
0177         andcc           %l1, %o0, %g0
0178 
0179         /* This fpdepth clear is necessary for non-syscall rtraps only */
0180 user_nowork:
0181         bne,pn          %xcc, __handle_userfpu
0182          stb            %g0, [%g6 + TI_FPDEPTH]
0183 __handle_userfpu_continue:
0184 
0185 rt_continue:    ldx         [%sp + PTREGS_OFF + PT_V9_G1], %g1
0186         ldx         [%sp + PTREGS_OFF + PT_V9_G2], %g2
0187 
0188         ldx         [%sp + PTREGS_OFF + PT_V9_G3], %g3
0189         ldx         [%sp + PTREGS_OFF + PT_V9_G4], %g4
0190         ldx         [%sp + PTREGS_OFF + PT_V9_G5], %g5
0191         brz,pt          %l3, 1f
0192         mov         %g6, %l2
0193 
0194         /* Must do this before thread reg is clobbered below.  */
0195         LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
0196 1:
0197         ldx         [%sp + PTREGS_OFF + PT_V9_G6], %g6
0198         ldx         [%sp + PTREGS_OFF + PT_V9_G7], %g7
0199 
0200         /* Normal globals are restored, go to trap globals.  */
0201 661:        wrpr            %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
0202         nop
0203         .section        .sun4v_2insn_patch, "ax"
0204         .word           661b
0205         wrpr            %g0, RTRAP_PSTATE_IRQOFF, %pstate
0206         SET_GL(1)
0207         .previous
0208 
0209         mov         %l2, %g6
0210 
0211         ldx         [%sp + PTREGS_OFF + PT_V9_I0], %i0
0212         ldx         [%sp + PTREGS_OFF + PT_V9_I1], %i1
0213 
0214         ldx         [%sp + PTREGS_OFF + PT_V9_I2], %i2
0215         ldx         [%sp + PTREGS_OFF + PT_V9_I3], %i3
0216         ldx         [%sp + PTREGS_OFF + PT_V9_I4], %i4
0217         ldx         [%sp + PTREGS_OFF + PT_V9_I5], %i5
0218         ldx         [%sp + PTREGS_OFF + PT_V9_I6], %i6
0219         ldx         [%sp + PTREGS_OFF + PT_V9_I7], %i7
0220         ldx         [%sp + PTREGS_OFF + PT_V9_TPC], %l2
0221         ldx         [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
0222 
0223         ld          [%sp + PTREGS_OFF + PT_V9_Y], %o3
0224         wr          %o3, %g0, %y
0225         wrpr            %l4, 0x0, %pil
0226         wrpr            %g0, 0x1, %tl
0227         andn            %l1, TSTATE_SYSCALL, %l1
0228         wrpr            %l1, %g0, %tstate
0229         wrpr            %l2, %g0, %tpc
0230         wrpr            %o2, %g0, %tnpc
0231 
0232         brnz,pn         %l3, kern_rtt
0233          mov            PRIMARY_CONTEXT, %l7
0234 
0235 661:        ldxa            [%l7 + %l7] ASI_DMMU, %l0
0236         .section        .sun4v_1insn_patch, "ax"
0237         .word           661b
0238         ldxa            [%l7 + %l7] ASI_MMU, %l0
0239         .previous
0240 
0241         sethi           %hi(sparc64_kern_pri_nuc_bits), %l1
0242         ldx         [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
0243         or          %l0, %l1, %l0
0244 
0245 661:        stxa            %l0, [%l7] ASI_DMMU
0246         .section        .sun4v_1insn_patch, "ax"
0247         .word           661b
0248         stxa            %l0, [%l7] ASI_MMU
0249         .previous
0250 
0251         sethi           %hi(KERNBASE), %l7
0252         flush           %l7
0253         rdpr            %wstate, %l1
0254         rdpr            %otherwin, %l2
0255         srl         %l1, 3, %l1
0256 
0257 661:        wrpr            %l2, %g0, %canrestore
0258         .section        .fast_win_ctrl_1insn_patch, "ax"
0259         .word           661b
0260         .word           0x89880000  ! normalw
0261         .previous
0262 
0263         wrpr            %l1, %g0, %wstate
0264         brnz,pt         %l2, user_rtt_restore
0265 661:         wrpr           %g0, %g0, %otherwin
0266         .section        .fast_win_ctrl_1insn_patch, "ax"
0267         .word           661b
0268          nop
0269         .previous
0270 
0271         ldx         [%g6 + TI_FLAGS], %g3
0272         wr          %g0, ASI_AIUP, %asi
0273         rdpr            %cwp, %g1
0274         andcc           %g3, _TIF_32BIT, %g0
0275         sub         %g1, 1, %g1
0276         bne,pt          %xcc, user_rtt_fill_32bit
0277          wrpr           %g1, %cwp
0278         ba,a,pt         %xcc, user_rtt_fill_64bit
0279          nop
0280 
0281 user_rtt_fill_fixup_dax:
0282         ba,pt   %xcc, user_rtt_fill_fixup_common
0283          mov    1, %g3
0284 
0285 user_rtt_fill_fixup_mna:
0286         ba,pt   %xcc, user_rtt_fill_fixup_common
0287          mov    2, %g3
0288 
0289 user_rtt_fill_fixup:
0290         ba,pt   %xcc, user_rtt_fill_fixup_common
0291          clr    %g3
0292 
0293 user_rtt_pre_restore:
0294         add         %g1, 1, %g1
0295         wrpr            %g1, 0x0, %cwp
0296 
0297 user_rtt_restore:
0298         restore
0299         rdpr            %canrestore, %g1
0300         wrpr            %g1, 0x0, %cleanwin
0301         retry
0302         nop
0303 
0304 kern_rtt:   rdpr            %canrestore, %g1
0305         brz,pn          %g1, kern_rtt_fill
0306          nop
0307 kern_rtt_restore:
0308         stw         %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
0309         restore
0310         retry
0311 
0312 to_kernel:
0313 #ifdef CONFIG_PREEMPTION
0314         ldsw            [%g6 + TI_PRE_COUNT], %l5
0315         brnz            %l5, kern_fpucheck
0316          ldx            [%g6 + TI_FLAGS], %l5
0317         andcc           %l5, _TIF_NEED_RESCHED, %g0
0318         be,pt           %xcc, kern_fpucheck
0319          nop
0320         cmp         %l4, 0
0321         bne,pn          %xcc, kern_fpucheck
0322          nop
0323         call            preempt_schedule_irq
0324          nop
0325         ba,pt           %xcc, rtrap
0326 #endif
0327 kern_fpucheck:  ldub            [%g6 + TI_FPDEPTH], %l5
0328         brz,pt          %l5, rt_continue
0329          srl            %l5, 1, %o0
0330         add         %g6, TI_FPSAVED, %l6
0331         ldub            [%l6 + %o0], %l2
0332         sub         %l5, 2, %l5
0333 
0334         add         %g6, TI_GSR, %o1
0335         andcc           %l2, (FPRS_FEF|FPRS_DU), %g0
0336         be,pt           %icc, 2f
0337          and            %l2, FPRS_DL, %l6
0338         andcc           %l2, FPRS_FEF, %g0
0339         be,pn           %icc, 5f
0340          sll            %o0, 3, %o5
0341         rd          %fprs, %g1
0342 
0343         wr          %g1, FPRS_FEF, %fprs
0344         ldx         [%o1 + %o5], %g1
0345         add         %g6, TI_XFSR, %o1
0346         sll         %o0, 8, %o2
0347         add         %g6, TI_FPREGS, %o3
0348         brz,pn          %l6, 1f
0349          add            %g6, TI_FPREGS+0x40, %o4
0350 
0351         membar          #Sync
0352         ldda            [%o3 + %o2] ASI_BLK_P, %f0
0353         ldda            [%o4 + %o2] ASI_BLK_P, %f16
0354         membar          #Sync
0355 1:      andcc           %l2, FPRS_DU, %g0
0356         be,pn           %icc, 1f
0357          wr         %g1, 0, %gsr
0358         add         %o2, 0x80, %o2
0359         membar          #Sync
0360         ldda            [%o3 + %o2] ASI_BLK_P, %f32
0361         ldda            [%o4 + %o2] ASI_BLK_P, %f48
0362 1:      membar          #Sync
0363         ldx         [%o1 + %o5], %fsr
0364 2:      stb         %l5, [%g6 + TI_FPDEPTH]
0365         ba,pt           %xcc, rt_continue
0366          nop
0367 5:      wr          %g0, FPRS_FEF, %fprs
0368         sll         %o0, 8, %o2
0369 
0370         add         %g6, TI_FPREGS+0x80, %o3
0371         add         %g6, TI_FPREGS+0xc0, %o4
0372         membar          #Sync
0373         ldda            [%o3 + %o2] ASI_BLK_P, %f32
0374         ldda            [%o4 + %o2] ASI_BLK_P, %f48
0375         membar          #Sync
0376         wr          %g0, FPRS_DU, %fprs
0377         ba,pt           %xcc, rt_continue
0378          stb            %l5, [%g6 + TI_FPDEPTH]