Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  *  linux/arch/arm/lib/memset.S
0004  *
0005  *  Copyright (C) 1995-2000 Russell King
0006  *
0007  *  ASM optimised string functions
0008  */
0009 #include <linux/linkage.h>
0010 #include <asm/assembler.h>
0011 #include <asm/unwind.h>
0012 
0013     .text
0014     .align  5
0015 
0016 ENTRY(__memset)
0017 ENTRY(mmioset)
0018 WEAK(memset)
0019 UNWIND( .fnstart         )
0020     ands    r3, r0, #3      @ 1 unaligned?
0021     mov ip, r0          @ preserve r0 as return value
0022     bne 6f          @ 1
0023 /*
0024  * we know that the pointer in ip is aligned to a word boundary.
0025  */
0026 1:  orr r1, r1, r1, lsl #8
0027     orr r1, r1, r1, lsl #16
0028     mov r3, r1
0029 7:  cmp r2, #16
0030     blt 4f
0031 UNWIND( .fnend              )
0032 
0033 #if ! CALGN(1)+0
0034 
0035 /*
0036  * We need 2 extra registers for this loop - use r8 and the LR
0037  */
0038 UNWIND( .fnstart            )
0039 UNWIND( .save {r8, lr}      )
0040     stmfd   sp!, {r8, lr}
0041     mov r8, r1
0042     mov lr, r3
0043 
0044 2:  subs    r2, r2, #64
0045     stmiage ip!, {r1, r3, r8, lr}   @ 64 bytes at a time.
0046     stmiage ip!, {r1, r3, r8, lr}
0047     stmiage ip!, {r1, r3, r8, lr}
0048     stmiage ip!, {r1, r3, r8, lr}
0049     bgt 2b
0050     ldmfdeq sp!, {r8, pc}       @ Now <64 bytes to go.
0051 /*
0052  * No need to correct the count; we're only testing bits from now on
0053  */
0054     tst r2, #32
0055     stmiane ip!, {r1, r3, r8, lr}
0056     stmiane ip!, {r1, r3, r8, lr}
0057     tst r2, #16
0058     stmiane ip!, {r1, r3, r8, lr}
0059     ldmfd   sp!, {r8, lr}
0060 UNWIND( .fnend              )
0061 
0062 #else
0063 
0064 /*
0065  * This version aligns the destination pointer in order to write
0066  * whole cache lines at once.
0067  */
0068 
0069 UNWIND( .fnstart               )
0070 UNWIND( .save {r4-r8, lr}      )
0071     stmfd   sp!, {r4-r8, lr}
0072     mov r4, r1
0073     mov r5, r3
0074     mov r6, r1
0075     mov r7, r3
0076     mov r8, r1
0077     mov lr, r3
0078 
0079     cmp r2, #96
0080     tstgt   ip, #31
0081     ble 3f
0082 
0083     and r8, ip, #31
0084     rsb r8, r8, #32
0085     sub r2, r2, r8
0086     movs    r8, r8, lsl #(32 - 4)
0087     stmiacs ip!, {r4, r5, r6, r7}
0088     stmiami ip!, {r4, r5}
0089     tst r8, #(1 << 30)
0090     mov r8, r1
0091     strne   r1, [ip], #4
0092 
0093 3:  subs    r2, r2, #64
0094     stmiage ip!, {r1, r3-r8, lr}
0095     stmiage ip!, {r1, r3-r8, lr}
0096     bgt 3b
0097     ldmfdeq sp!, {r4-r8, pc}
0098 
0099     tst r2, #32
0100     stmiane ip!, {r1, r3-r8, lr}
0101     tst r2, #16
0102     stmiane ip!, {r4-r7}
0103     ldmfd   sp!, {r4-r8, lr}
0104 UNWIND( .fnend                 )
0105 
0106 #endif
0107 
0108 UNWIND( .fnstart            )
0109 4:  tst r2, #8
0110     stmiane ip!, {r1, r3}
0111     tst r2, #4
0112     strne   r1, [ip], #4
0113 /*
0114  * When we get here, we've got less than 4 bytes to set.  We
0115  * may have an unaligned pointer as well.
0116  */
0117 5:  tst r2, #2
0118     strbne  r1, [ip], #1
0119     strbne  r1, [ip], #1
0120     tst r2, #1
0121     strbne  r1, [ip], #1
0122     ret lr
0123 
0124 6:  subs    r2, r2, #4      @ 1 do we have enough
0125     blt 5b          @ 1 bytes to align with?
0126     cmp r3, #2          @ 1
0127     strblt  r1, [ip], #1        @ 1
0128     strble  r1, [ip], #1        @ 1
0129     strb    r1, [ip], #1        @ 1
0130     add r2, r2, r3      @ 1 (r2 = r2 - (4 - r3))
0131     b   1b
0132 UNWIND( .fnend   )
0133 ENDPROC(memset)
0134 ENDPROC(mmioset)
0135 ENDPROC(__memset)
0136 
0137 ENTRY(__memset32)
0138 UNWIND( .fnstart         )
0139     mov r3, r1          @ copy r1 to r3 and fall into memset64
0140 UNWIND( .fnend   )
0141 ENDPROC(__memset32)
0142 ENTRY(__memset64)
0143 UNWIND( .fnstart         )
0144     mov ip, r0          @ preserve r0 as return value
0145     b   7b          @ jump into the middle of memset
0146 UNWIND( .fnend   )
0147 ENDPROC(__memset64)