Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #include <asm/processor.h>
0003 #include <asm/ppc_asm.h>
0004 #include <asm/reg.h>
0005 #include <asm/asm-offsets.h>
0006 #include <asm/cputable.h>
0007 #include <asm/thread_info.h>
0008 #include <asm/page.h>
0009 #include <asm/ptrace.h>
0010 #include <asm/export.h>
0011 #include <asm/asm-compat.h>
0012 
0013 /*
0014  * Load state from memory into VMX registers including VSCR.
0015  * Assumes the caller has enabled VMX in the MSR.
0016  */
0017 _GLOBAL(load_vr_state)
0018     li  r4,VRSTATE_VSCR
0019     lvx v0,r4,r3
0020     mtvscr  v0
0021     REST_32VRS(0,r4,r3)
0022     blr
0023 EXPORT_SYMBOL(load_vr_state)
0024 _ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
0025 
0026 /*
0027  * Store VMX state into memory, including VSCR.
0028  * Assumes the caller has enabled VMX in the MSR.
0029  */
0030 _GLOBAL(store_vr_state)
0031     SAVE_32VRS(0, r4, r3)
0032     mfvscr  v0
0033     li  r4, VRSTATE_VSCR
0034     stvx    v0, r4, r3
0035     blr
0036 EXPORT_SYMBOL(store_vr_state)
0037 
0038 /*
0039  * Disable VMX for the task which had it previously,
0040  * and save its vector registers in its thread_struct.
0041  * Enables the VMX for use in the kernel on return.
0042  * On SMP we know the VMX is free, since we give it up every
0043  * switch (ie, no lazy save of the vector registers).
0044  *
0045  * Note that on 32-bit this can only use registers that will be
0046  * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
0047  */
0048 _GLOBAL(load_up_altivec)
0049     mfmsr   r5          /* grab the current MSR */
0050 #ifdef CONFIG_PPC_BOOK3S_64
0051     /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
0052     ori r5,r5,MSR_RI
0053 #endif
0054     oris    r5,r5,MSR_VEC@h
0055     MTMSRD(r5)          /* enable use of AltiVec now */
0056     isync
0057 
0058     /*
0059      * While userspace in general ignores VRSAVE, glibc uses it as a boolean
0060      * to optimise userspace context save/restore. Whenever we take an
0061      * altivec unavailable exception we must set VRSAVE to something non
0062      * zero. Set it to all 1s. See also the programming note in the ISA.
0063      */
0064     mfspr   r4,SPRN_VRSAVE
0065     cmpwi   0,r4,0
0066     bne+    1f
0067     li  r4,-1
0068     mtspr   SPRN_VRSAVE,r4
0069 1:
0070     /* enable use of VMX after return */
0071 #ifdef CONFIG_PPC32
0072     addi    r5,r2,THREAD
0073     oris    r9,r9,MSR_VEC@h
0074 #else
0075     ld  r4,PACACURRENT(r13)
0076     addi    r5,r4,THREAD        /* Get THREAD */
0077     oris    r12,r12,MSR_VEC@h
0078     std r12,_MSR(r1)
0079 #ifdef CONFIG_PPC_BOOK3S_64
0080     li  r4,0
0081     stb r4,PACASRR_VALID(r13)
0082 #endif
0083 #endif
0084     li  r4,1
0085     stb r4,THREAD_LOAD_VEC(r5)
0086     addi    r6,r5,THREAD_VRSTATE
0087     li  r10,VRSTATE_VSCR
0088     stw r4,THREAD_USED_VR(r5)
0089     lvx v0,r10,r6
0090     mtvscr  v0
0091     REST_32VRS(0,r4,r6)
0092     /* restore registers and return */
0093     blr
0094 _ASM_NOKPROBE_SYMBOL(load_up_altivec)
0095 
0096 /*
0097  * save_altivec(tsk)
0098  * Save the vector registers to its thread_struct
0099  */
0100 _GLOBAL(save_altivec)
0101     addi    r3,r3,THREAD        /* want THREAD of task */
0102     PPC_LL  r7,THREAD_VRSAVEAREA(r3)
0103     PPC_LL  r5,PT_REGS(r3)
0104     PPC_LCMPI   0,r7,0
0105     bne 2f
0106     addi    r7,r3,THREAD_VRSTATE
0107 2:  SAVE_32VRS(0,r4,r7)
0108     mfvscr  v0
0109     li  r4,VRSTATE_VSCR
0110     stvx    v0,r4,r7
0111     blr
0112 
0113 #ifdef CONFIG_VSX
0114 
0115 #ifdef CONFIG_PPC32
0116 #error This asm code isn't ready for 32-bit kernels
0117 #endif
0118 
0119 /*
0120  * load_up_vsx(unused, unused, tsk)
0121  * Disable VSX for the task which had it previously,
0122  * and save its vector registers in its thread_struct.
0123  * Reuse the fp and vsx saves, but first check to see if they have
0124  * been saved already.
0125  */
0126 _GLOBAL(load_up_vsx)
0127 /* Load FP and VSX registers if they haven't been done yet */
0128     andi.   r5,r12,MSR_FP
0129     beql+   load_up_fpu     /* skip if already loaded */
0130     andis.  r5,r12,MSR_VEC@h
0131     beql+   load_up_altivec     /* skip if already loaded */
0132 
0133 #ifdef CONFIG_PPC_BOOK3S_64
0134     /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
0135     li  r5,MSR_RI
0136     mtmsrd  r5,1
0137 #endif
0138 
0139     ld  r4,PACACURRENT(r13)
0140     addi    r4,r4,THREAD        /* Get THREAD */
0141     li  r6,1
0142     stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
0143     /* enable use of VSX after return */
0144     oris    r12,r12,MSR_VSX@h
0145     std r12,_MSR(r1)
0146     li  r4,0
0147     stb r4,PACASRR_VALID(r13)
0148     b   fast_interrupt_return_srr
0149 
0150 #endif /* CONFIG_VSX */
0151 
0152 
0153 /*
0154  * The routines below are in assembler so we can closely control the
0155  * usage of floating-point registers.  These routines must be called
0156  * with preempt disabled.
0157  */
0158 #ifdef CONFIG_PPC32
0159     .data
0160 fpzero:
0161     .long   0
0162 fpone:
0163     .long   0x3f800000  /* 1.0 in single-precision FP */
0164 fphalf:
0165     .long   0x3f000000  /* 0.5 in single-precision FP */
0166 
0167 #define LDCONST(fr, name)   \
0168     lis r11,name@ha;    \
0169     lfs fr,name@l(r11)
0170 #else
0171 
0172     .section ".toc","aw"
0173 fpzero:
0174     .tc FD_0_0[TC],0
0175 fpone:
0176     .tc FD_3ff00000_0[TC],0x3ff0000000000000    /* 1.0 */
0177 fphalf:
0178     .tc FD_3fe00000_0[TC],0x3fe0000000000000    /* 0.5 */
0179 
0180 #define LDCONST(fr, name)   \
0181     lfd fr,name@toc(r2)
0182 #endif
0183 
0184     .text
0185 /*
0186  * Internal routine to enable floating point and set FPSCR to 0.
0187  * Don't call it from C; it doesn't use the normal calling convention.
0188  */
0189 fpenable:
0190 #ifdef CONFIG_PPC32
0191     stwu    r1,-64(r1)
0192 #else
0193     stdu    r1,-64(r1)
0194 #endif
0195     mfmsr   r10
0196     ori r11,r10,MSR_FP
0197     mtmsr   r11
0198     isync
0199     stfd    fr0,24(r1)
0200     stfd    fr1,16(r1)
0201     stfd    fr31,8(r1)
0202     LDCONST(fr1, fpzero)
0203     mffs    fr31
0204     MTFSF_L(fr1)
0205     blr
0206 
0207 fpdisable:
0208     mtlr    r12
0209     MTFSF_L(fr31)
0210     lfd fr31,8(r1)
0211     lfd fr1,16(r1)
0212     lfd fr0,24(r1)
0213     mtmsr   r10
0214     isync
0215     addi    r1,r1,64
0216     blr
0217 
0218 /*
0219  * Vector add, floating point.
0220  */
0221 _GLOBAL(vaddfp)
0222     mflr    r12
0223     bl  fpenable
0224     li  r0,4
0225     mtctr   r0
0226     li  r6,0
0227 1:  lfsx    fr0,r4,r6
0228     lfsx    fr1,r5,r6
0229     fadds   fr0,fr0,fr1
0230     stfsx   fr0,r3,r6
0231     addi    r6,r6,4
0232     bdnz    1b
0233     b   fpdisable
0234 
0235 /*
0236  * Vector subtract, floating point.
0237  */
0238 _GLOBAL(vsubfp)
0239     mflr    r12
0240     bl  fpenable
0241     li  r0,4
0242     mtctr   r0
0243     li  r6,0
0244 1:  lfsx    fr0,r4,r6
0245     lfsx    fr1,r5,r6
0246     fsubs   fr0,fr0,fr1
0247     stfsx   fr0,r3,r6
0248     addi    r6,r6,4
0249     bdnz    1b
0250     b   fpdisable
0251 
0252 /*
0253  * Vector multiply and add, floating point.
0254  */
0255 _GLOBAL(vmaddfp)
0256     mflr    r12
0257     bl  fpenable
0258     stfd    fr2,32(r1)
0259     li  r0,4
0260     mtctr   r0
0261     li  r7,0
0262 1:  lfsx    fr0,r4,r7
0263     lfsx    fr1,r5,r7
0264     lfsx    fr2,r6,r7
0265     fmadds  fr0,fr0,fr2,fr1
0266     stfsx   fr0,r3,r7
0267     addi    r7,r7,4
0268     bdnz    1b
0269     lfd fr2,32(r1)
0270     b   fpdisable
0271 
0272 /*
0273  * Vector negative multiply and subtract, floating point.
0274  */
0275 _GLOBAL(vnmsubfp)
0276     mflr    r12
0277     bl  fpenable
0278     stfd    fr2,32(r1)
0279     li  r0,4
0280     mtctr   r0
0281     li  r7,0
0282 1:  lfsx    fr0,r4,r7
0283     lfsx    fr1,r5,r7
0284     lfsx    fr2,r6,r7
0285     fnmsubs fr0,fr0,fr2,fr1
0286     stfsx   fr0,r3,r7
0287     addi    r7,r7,4
0288     bdnz    1b
0289     lfd fr2,32(r1)
0290     b   fpdisable
0291 
0292 /*
0293  * Vector reciprocal estimate.  We just compute 1.0/x.
0294  * r3 -> destination, r4 -> source.
0295  */
0296 _GLOBAL(vrefp)
0297     mflr    r12
0298     bl  fpenable
0299     li  r0,4
0300     LDCONST(fr1, fpone)
0301     mtctr   r0
0302     li  r6,0
0303 1:  lfsx    fr0,r4,r6
0304     fdivs   fr0,fr1,fr0
0305     stfsx   fr0,r3,r6
0306     addi    r6,r6,4
0307     bdnz    1b
0308     b   fpdisable
0309 
0310 /*
0311  * Vector reciprocal square-root estimate, floating point.
0312  * We use the frsqrte instruction for the initial estimate followed
0313  * by 2 iterations of Newton-Raphson to get sufficient accuracy.
0314  * r3 -> destination, r4 -> source.
0315  */
0316 _GLOBAL(vrsqrtefp)
0317     mflr    r12
0318     bl  fpenable
0319     stfd    fr2,32(r1)
0320     stfd    fr3,40(r1)
0321     stfd    fr4,48(r1)
0322     stfd    fr5,56(r1)
0323     li  r0,4
0324     LDCONST(fr4, fpone)
0325     LDCONST(fr5, fphalf)
0326     mtctr   r0
0327     li  r6,0
0328 1:  lfsx    fr0,r4,r6
0329     frsqrte fr1,fr0     /* r = frsqrte(s) */
0330     fmuls   fr3,fr1,fr0 /* r * s */
0331     fmuls   fr2,fr1,fr5 /* r * 0.5 */
0332     fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
0333     fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
0334     fmuls   fr3,fr1,fr0 /* r * s */
0335     fmuls   fr2,fr1,fr5 /* r * 0.5 */
0336     fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
0337     fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
0338     stfsx   fr1,r3,r6
0339     addi    r6,r6,4
0340     bdnz    1b
0341     lfd fr5,56(r1)
0342     lfd fr4,48(r1)
0343     lfd fr3,40(r1)
0344     lfd fr2,32(r1)
0345     b   fpdisable