Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  linux/arch/arm/vfp/vfphw.S
0004  *
0005  *  Copyright (C) 2004 ARM Limited.
0006  *  Written by Deep Blue Solutions Limited.
0007  *
0008  * This code is called from the kernel's undefined instruction trap.
0009  * r9 holds the return address for successful handling.
0010  * lr holds the return address for unrecognised instructions.
0011  * r10 points at the start of the private FP workspace in the thread structure
0012  * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
0013  */
0014 #include <linux/init.h>
0015 #include <linux/linkage.h>
0016 #include <asm/thread_info.h>
0017 #include <asm/vfpmacros.h>
0018 #include <linux/kern_levels.h>
0019 #include <asm/assembler.h>
0020 #include <asm/asm-offsets.h>
0021 
0022     .macro  DBGSTR, str
0023 #ifdef DEBUG
0024     stmfd   sp!, {r0-r3, ip, lr}
0025     ldr r0, =1f
0026     bl  _printk
0027     ldmfd   sp!, {r0-r3, ip, lr}
0028 
0029     .pushsection .rodata, "a"
0030 1:  .ascii  KERN_DEBUG "VFP: \str\n"
0031     .byte   0
0032     .previous
0033 #endif
0034     .endm
0035 
0036     .macro  DBGSTR1, str, arg
0037 #ifdef DEBUG
0038     stmfd   sp!, {r0-r3, ip, lr}
0039     mov r1, \arg
0040     ldr r0, =1f
0041     bl  _printk
0042     ldmfd   sp!, {r0-r3, ip, lr}
0043 
0044     .pushsection .rodata, "a"
0045 1:  .ascii  KERN_DEBUG "VFP: \str\n"
0046     .byte   0
0047     .previous
0048 #endif
0049     .endm
0050 
0051     .macro  DBGSTR3, str, arg1, arg2, arg3
0052 #ifdef DEBUG
0053     stmfd   sp!, {r0-r3, ip, lr}
0054     mov r3, \arg3
0055     mov r2, \arg2
0056     mov r1, \arg1
0057     ldr r0, =1f
0058     bl  _printk
0059     ldmfd   sp!, {r0-r3, ip, lr}
0060 
0061     .pushsection .rodata, "a"
0062 1:  .ascii  KERN_DEBUG "VFP: \str\n"
0063     .byte   0
0064     .previous
0065 #endif
0066     .endm
0067 
0068 
0069 @ VFP hardware support entry point.
0070 @
0071 @  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
0072 @  r2  = PC value to resume execution after successful emulation
0073 @  r9  = normal "successful" return address
0074 @  r10 = vfp_state union
0075 @  r11 = CPU number
0076 @  lr  = unrecognised instruction return address
0077 @  IRQs enabled.
0078 ENTRY(vfp_support_entry)
0079     DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
0080 
0081     .fpu    vfpv2
0082     VFPFMRX r1, FPEXC       @ Is the VFP enabled?
0083     DBGSTR1 "fpexc %08x", r1
0084     tst r1, #FPEXC_EN
0085     bne look_for_VFP_exceptions @ VFP is already enabled
0086 
0087     DBGSTR1 "enable %x", r10
0088     ldr r3, vfp_current_hw_state_address
0089     orr r1, r1, #FPEXC_EN   @ user FPEXC has the enable bit set
0090     ldr r4, [r3, r11, lsl #2]   @ vfp_current_hw_state pointer
0091     bic r5, r1, #FPEXC_EX   @ make sure exceptions are disabled
0092     cmp r4, r10         @ this thread owns the hw context?
0093 #ifndef CONFIG_SMP
0094     @ For UP, checking that this thread owns the hw context is
0095     @ sufficient to determine that the hardware state is valid.
0096     beq vfp_hw_state_valid
0097 
0098     @ On UP, we lazily save the VFP context.  As a different
0099     @ thread wants ownership of the VFP hardware, save the old
0100     @ state if there was a previous (valid) owner.
0101 
0102     VFPFMXR FPEXC, r5       @ enable VFP, disable any pending
0103                     @ exceptions, so we can get at the
0104                     @ rest of it
0105 
0106     DBGSTR1 "save old state %p", r4
0107     cmp r4, #0          @ if the vfp_current_hw_state is NULL
0108     beq vfp_reload_hw       @ then the hw state needs reloading
0109     VFPFSTMIA r4, r5        @ save the working registers
0110     VFPFMRX r5, FPSCR       @ current status
0111 #ifndef CONFIG_CPU_FEROCEON
0112     tst r1, #FPEXC_EX       @ is there additional state to save?
0113     beq 1f
0114     VFPFMRX r6, FPINST      @ FPINST (only if FPEXC.EX is set)
0115     tst r1, #FPEXC_FP2V     @ is there an FPINST2 to read?
0116     beq 1f
0117     VFPFMRX r8, FPINST2     @ FPINST2 if needed (and present)
0118 1:
0119 #endif
0120     stmia   r4, {r1, r5, r6, r8}    @ save FPEXC, FPSCR, FPINST, FPINST2
0121 vfp_reload_hw:
0122 
0123 #else
0124     @ For SMP, if this thread does not own the hw context, then we
0125     @ need to reload it.  No need to save the old state as on SMP,
0126     @ we always save the state when we switch away from a thread.
0127     bne vfp_reload_hw
0128 
0129     @ This thread has ownership of the current hardware context.
0130     @ However, it may have been migrated to another CPU, in which
0131     @ case the saved state is newer than the hardware context.
0132     @ Check this by looking at the CPU number which the state was
0133     @ last loaded onto.
0134     ldr ip, [r10, #VFP_CPU]
0135     teq ip, r11
0136     beq vfp_hw_state_valid
0137 
0138 vfp_reload_hw:
0139     @ We're loading this threads state into the VFP hardware. Update
0140     @ the CPU number which contains the most up to date VFP context.
0141     str r11, [r10, #VFP_CPU]
0142 
0143     VFPFMXR FPEXC, r5       @ enable VFP, disable any pending
0144                     @ exceptions, so we can get at the
0145                     @ rest of it
0146 #endif
0147 
0148     DBGSTR1 "load state %p", r10
0149     str r10, [r3, r11, lsl #2]  @ update the vfp_current_hw_state pointer
0150                     @ Load the saved state back into the VFP
0151     VFPFLDMIA r10, r5       @ reload the working registers while
0152                     @ FPEXC is in a safe state
0153     ldmia   r10, {r1, r5, r6, r8}   @ load FPEXC, FPSCR, FPINST, FPINST2
0154 #ifndef CONFIG_CPU_FEROCEON
0155     tst r1, #FPEXC_EX       @ is there additional state to restore?
0156     beq 1f
0157     VFPFMXR FPINST, r6      @ restore FPINST (only if FPEXC.EX is set)
0158     tst r1, #FPEXC_FP2V     @ is there an FPINST2 to write?
0159     beq 1f
0160     VFPFMXR FPINST2, r8     @ FPINST2 if needed (and present)
0161 1:
0162 #endif
0163     VFPFMXR FPSCR, r5       @ restore status
0164 
0165 @ The context stored in the VFP hardware is up to date with this thread
0166 vfp_hw_state_valid:
0167     tst r1, #FPEXC_EX
0168     bne process_exception   @ might as well handle the pending
0169                     @ exception before retrying branch
0170                     @ out before setting an FPEXC that
0171                     @ stops us reading stuff
0172     VFPFMXR FPEXC, r1       @ Restore FPEXC last
0173     sub r2, r2, #4      @ Retry current instruction - if Thumb
0174     str r2, [sp, #S_PC]     @ mode it's two 16-bit instructions,
0175                     @ else it's one 32-bit instruction, so
0176                     @ always subtract 4 from the following
0177                     @ instruction address.
0178     dec_preempt_count_ti r10, r4
0179     ret r9          @ we think we have handled things
0180 
0181 
0182 look_for_VFP_exceptions:
0183     @ Check for synchronous or asynchronous exception
0184     tst r1, #FPEXC_EX | FPEXC_DEX
0185     bne process_exception
0186     @ On some implementations of the VFP subarch 1, setting FPSCR.IXE
0187     @ causes all the CDP instructions to be bounced synchronously without
0188     @ setting the FPEXC.EX bit
0189     VFPFMRX r5, FPSCR
0190     tst r5, #FPSCR_IXE
0191     bne process_exception
0192 
0193     tst r5, #FPSCR_LENGTH_MASK
0194     beq skip
0195     orr r1, r1, #FPEXC_DEX
0196     b   process_exception
0197 skip:
0198 
0199     @ Fall into hand on to next handler - appropriate coproc instr
0200     @ not recognised by VFP
0201 
0202     DBGSTR  "not VFP"
0203     dec_preempt_count_ti r10, r4
0204     ret lr
0205 
0206 process_exception:
0207     DBGSTR  "bounce"
0208     mov r2, sp          @ nothing stacked - regdump is at TOS
0209     mov lr, r9          @ setup for a return to the user code.
0210 
0211     @ Now call the C code to package up the bounce to the support code
0212     @   r0 holds the trigger instruction
0213     @   r1 holds the FPEXC value
0214     @   r2 pointer to register dump
0215     b   VFP_bounce      @ we have handled this - the support
0216                     @ code will raise an exception if
0217                     @ required. If not, the user code will
0218                     @ retry the faulted instruction
0219 ENDPROC(vfp_support_entry)
0220 
0221 ENTRY(vfp_save_state)
0222     @ Save the current VFP state
0223     @ r0 - save location
0224     @ r1 - FPEXC
0225     DBGSTR1 "save VFP state %p", r0
0226     VFPFSTMIA r0, r2        @ save the working registers
0227     VFPFMRX r2, FPSCR       @ current status
0228     tst r1, #FPEXC_EX       @ is there additional state to save?
0229     beq 1f
0230     VFPFMRX r3, FPINST      @ FPINST (only if FPEXC.EX is set)
0231     tst r1, #FPEXC_FP2V     @ is there an FPINST2 to read?
0232     beq 1f
0233     VFPFMRX r12, FPINST2        @ FPINST2 if needed (and present)
0234 1:
0235     stmia   r0, {r1, r2, r3, r12}   @ save FPEXC, FPSCR, FPINST, FPINST2
0236     ret lr
0237 ENDPROC(vfp_save_state)
0238 
0239     .align
0240 vfp_current_hw_state_address:
0241     .word   vfp_current_hw_state
0242 
0243     .macro  tbl_branch, base, tmp, shift
0244 #ifdef CONFIG_THUMB2_KERNEL
0245     adr \tmp, 1f
0246     add \tmp, \tmp, \base, lsl \shift
0247     ret \tmp
0248 #else
0249     add pc, pc, \base, lsl \shift
0250     mov r0, r0
0251 #endif
0252 1:
0253     .endm
0254 
0255 ENTRY(vfp_get_float)
0256     tbl_branch r0, r3, #3
0257     .fpu    vfpv2
0258     .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
0259 1:  vmov    r0, s\dr
0260     ret lr
0261     .org    1b + 8
0262     .endr
0263     .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
0264 1:  vmov    r0, s\dr
0265     ret lr
0266     .org    1b + 8
0267     .endr
0268 ENDPROC(vfp_get_float)
0269 
0270 ENTRY(vfp_put_float)
0271     tbl_branch r1, r3, #3
0272     .fpu    vfpv2
0273     .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
0274 1:  vmov    s\dr, r0
0275     ret lr
0276     .org    1b + 8
0277     .endr
0278     .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
0279 1:  vmov    s\dr, r0
0280     ret lr
0281     .org    1b + 8
0282     .endr
0283 ENDPROC(vfp_put_float)
0284 
0285 ENTRY(vfp_get_double)
0286     tbl_branch r0, r3, #3
0287     .fpu    vfpv2
0288     .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
0289 1:  vmov    r0, r1, d\dr
0290     ret lr
0291     .org    1b + 8
0292     .endr
0293 #ifdef CONFIG_VFPv3
0294     @ d16 - d31 registers
0295     .fpu    vfpv3
0296     .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
0297 1:  vmov    r0, r1, d\dr
0298     ret lr
0299     .org    1b + 8
0300     .endr
0301 #endif
0302 
0303     @ virtual register 16 (or 32 if VFPv3) for compare with zero
0304     mov r0, #0
0305     mov r1, #0
0306     ret lr
0307 ENDPROC(vfp_get_double)
0308 
0309 ENTRY(vfp_put_double)
0310     tbl_branch r2, r3, #3
0311     .fpu    vfpv2
0312     .irp    dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
0313 1:  vmov    d\dr, r0, r1
0314     ret lr
0315     .org    1b + 8
0316     .endr
0317 #ifdef CONFIG_VFPv3
0318     .fpu    vfpv3
0319     @ d16 - d31 registers
0320     .irp    dr,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
0321 1:  vmov    d\dr, r0, r1
0322     ret lr
0323     .org    1b + 8
0324     .endr
0325 #endif
0326 ENDPROC(vfp_put_double)