Back to home page

OSCL-LXR

 
 

    


0001 #!/usr/bin/env perl
0002 # SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause
0003 #
0004 # ====================================================================
0005 # Written by Andy Polyakov, @dot-asm, initially for the OpenSSL
0006 # project.
0007 # ====================================================================
0008 #
0009 #           IALU(*)/gcc-4.4     NEON
0010 #
0011 # ARM11xx(ARMv6)    7.78/+100%      -
0012 # Cortex-A5     6.35/+130%      3.00
0013 # Cortex-A8     6.25/+115%      2.36
0014 # Cortex-A9     5.10/+95%       2.55
0015 # Cortex-A15        3.85/+85%       1.25(**)
0016 # Snapdragon S4     5.70/+100%      1.48(**)
0017 #
0018 # (*)   this is for -march=armv6, i.e. with bunch of ldrb loading data;
0019 # (**)  these are trade-off results, they can be improved by ~8% but at
0020 #   the cost of 15/12% regression on Cortex-A5/A7, it's even possible
0021 #   to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
0022 
0023 $flavour = shift;
0024 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
0025 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
0026 
0027 if ($flavour && $flavour ne "void") {
0028     $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
0029     ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
0030     ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
0031     die "can't locate arm-xlate.pl";
0032 
0033     open STDOUT,"| \"$^X\" $xlate $flavour $output";
0034 } else {
0035     open STDOUT,">$output";
0036 }
0037 
0038 ($ctx,$inp,$len,$padbit)=map("r$_",(0..3));
0039 
0040 $code.=<<___;
0041 #ifndef __KERNEL__
0042 # include "arm_arch.h"
0043 #else
0044 # define __ARM_ARCH__ __LINUX_ARM_ARCH__
0045 # define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
0046 # define poly1305_init   poly1305_init_arm
0047 # define poly1305_blocks poly1305_blocks_arm
0048 # define poly1305_emit   poly1305_emit_arm
0049 .globl  poly1305_blocks_neon
0050 #endif
0051 
0052 #if defined(__thumb2__)
0053 .syntax unified
0054 .thumb
0055 #else
0056 .code   32
0057 #endif
0058 
0059 .text
0060 
0061 .globl  poly1305_emit
0062 .globl  poly1305_blocks
0063 .globl  poly1305_init
0064 .type   poly1305_init,%function
0065 .align  5
0066 poly1305_init:
0067 .Lpoly1305_init:
0068     stmdb   sp!,{r4-r11}
0069 
0070     eor r3,r3,r3
0071     cmp $inp,#0
0072     str r3,[$ctx,#0]        @ zero hash value
0073     str r3,[$ctx,#4]
0074     str r3,[$ctx,#8]
0075     str r3,[$ctx,#12]
0076     str r3,[$ctx,#16]
0077     str r3,[$ctx,#36]       @ clear is_base2_26
0078     add $ctx,$ctx,#20
0079 
0080 #ifdef  __thumb2__
0081     it  eq
0082 #endif
0083     moveq   r0,#0
0084     beq .Lno_key
0085 
0086 #if __ARM_MAX_ARCH__>=7
0087     mov r3,#-1
0088     str r3,[$ctx,#28]       @ impossible key power value
0089 # ifndef __KERNEL__
0090     adr r11,.Lpoly1305_init
0091     ldr r12,.LOPENSSL_armcap
0092 # endif
0093 #endif
0094     ldrb    r4,[$inp,#0]
0095     mov r10,#0x0fffffff
0096     ldrb    r5,[$inp,#1]
0097     and r3,r10,#-4      @ 0x0ffffffc
0098     ldrb    r6,[$inp,#2]
0099     ldrb    r7,[$inp,#3]
0100     orr r4,r4,r5,lsl#8
0101     ldrb    r5,[$inp,#4]
0102     orr r4,r4,r6,lsl#16
0103     ldrb    r6,[$inp,#5]
0104     orr r4,r4,r7,lsl#24
0105     ldrb    r7,[$inp,#6]
0106     and r4,r4,r10
0107 
0108 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
0109 # if !defined(_WIN32)
0110     ldr r12,[r11,r12]       @ OPENSSL_armcap_P
0111 # endif
0112 # if defined(__APPLE__) || defined(_WIN32)
0113     ldr r12,[r12]
0114 # endif
0115 #endif
0116     ldrb    r8,[$inp,#7]
0117     orr r5,r5,r6,lsl#8
0118     ldrb    r6,[$inp,#8]
0119     orr r5,r5,r7,lsl#16
0120     ldrb    r7,[$inp,#9]
0121     orr r5,r5,r8,lsl#24
0122     ldrb    r8,[$inp,#10]
0123     and r5,r5,r3
0124 
0125 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
0126     tst r12,#ARMV7_NEON     @ check for NEON
0127 # ifdef __thumb2__
0128     adr r9,.Lpoly1305_blocks_neon
0129     adr r11,.Lpoly1305_blocks
0130     it  ne
0131     movne   r11,r9
0132     adr r12,.Lpoly1305_emit
0133     orr r11,r11,#1      @ thumb-ify addresses
0134     orr r12,r12,#1
0135 # else
0136     add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
0137     ite eq
0138     addeq   r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
0139     addne   r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
0140 # endif
0141 #endif
0142     ldrb    r9,[$inp,#11]
0143     orr r6,r6,r7,lsl#8
0144     ldrb    r7,[$inp,#12]
0145     orr r6,r6,r8,lsl#16
0146     ldrb    r8,[$inp,#13]
0147     orr r6,r6,r9,lsl#24
0148     ldrb    r9,[$inp,#14]
0149     and r6,r6,r3
0150 
0151     ldrb    r10,[$inp,#15]
0152     orr r7,r7,r8,lsl#8
0153     str r4,[$ctx,#0]
0154     orr r7,r7,r9,lsl#16
0155     str r5,[$ctx,#4]
0156     orr r7,r7,r10,lsl#24
0157     str r6,[$ctx,#8]
0158     and r7,r7,r3
0159     str r7,[$ctx,#12]
0160 #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
0161     stmia   r2,{r11,r12}        @ fill functions table
0162     mov r0,#1
0163 #else
0164     mov r0,#0
0165 #endif
0166 .Lno_key:
0167     ldmia   sp!,{r4-r11}
0168 #if __ARM_ARCH__>=5
0169     ret             @ bx    lr
0170 #else
0171     tst lr,#1
0172     moveq   pc,lr           @ be binary compatible with V4, yet
0173     bx  lr          @ interoperable with Thumb ISA:-)
0174 #endif
0175 .size   poly1305_init,.-poly1305_init
0176 ___
0177 {
0178 my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12));
0179 my ($s1,$s2,$s3)=($r1,$r2,$r3);
0180 
0181 $code.=<<___;
0182 .type   poly1305_blocks,%function
0183 .align  5
0184 poly1305_blocks:
0185 .Lpoly1305_blocks:
0186     stmdb   sp!,{r3-r11,lr}
0187 
0188     ands    $len,$len,#-16
0189     beq .Lno_data
0190 
0191     add $len,$len,$inp      @ end pointer
0192     sub sp,sp,#32
0193 
0194 #if __ARM_ARCH__<7
0195     ldmia   $ctx,{$h0-$r3}      @ load context
0196     add $ctx,$ctx,#20
0197     str $len,[sp,#16]       @ offload stuff
0198     str $ctx,[sp,#12]
0199 #else
0200     ldr lr,[$ctx,#36]       @ is_base2_26
0201     ldmia   $ctx!,{$h0-$h4}     @ load hash value
0202     str $len,[sp,#16]       @ offload stuff
0203     str $ctx,[sp,#12]
0204 
0205     adds    $r0,$h0,$h1,lsl#26  @ base 2^26 -> base 2^32
0206     mov $r1,$h1,lsr#6
0207     adcs    $r1,$r1,$h2,lsl#20
0208     mov $r2,$h2,lsr#12
0209     adcs    $r2,$r2,$h3,lsl#14
0210     mov $r3,$h3,lsr#18
0211     adcs    $r3,$r3,$h4,lsl#8
0212     mov $len,#0
0213     teq lr,#0
0214     str $len,[$ctx,#16]     @ clear is_base2_26
0215     adc $len,$len,$h4,lsr#24
0216 
0217     itttt   ne
0218     movne   $h0,$r0         @ choose between radixes
0219     movne   $h1,$r1
0220     movne   $h2,$r2
0221     movne   $h3,$r3
0222     ldmia   $ctx,{$r0-$r3}      @ load key
0223     it  ne
0224     movne   $h4,$len
0225 #endif
0226 
0227     mov lr,$inp
0228     cmp $padbit,#0
0229     str $r1,[sp,#20]
0230     str $r2,[sp,#24]
0231     str $r3,[sp,#28]
0232     b   .Loop
0233 
0234 .align  4
0235 .Loop:
0236 #if __ARM_ARCH__<7
0237     ldrb    r0,[lr],#16     @ load input
0238 # ifdef __thumb2__
0239     it  hi
0240 # endif
0241     addhi   $h4,$h4,#1      @ 1<<128
0242     ldrb    r1,[lr,#-15]
0243     ldrb    r2,[lr,#-14]
0244     ldrb    r3,[lr,#-13]
0245     orr r1,r0,r1,lsl#8
0246     ldrb    r0,[lr,#-12]
0247     orr r2,r1,r2,lsl#16
0248     ldrb    r1,[lr,#-11]
0249     orr r3,r2,r3,lsl#24
0250     ldrb    r2,[lr,#-10]
0251     adds    $h0,$h0,r3      @ accumulate input
0252 
0253     ldrb    r3,[lr,#-9]
0254     orr r1,r0,r1,lsl#8
0255     ldrb    r0,[lr,#-8]
0256     orr r2,r1,r2,lsl#16
0257     ldrb    r1,[lr,#-7]
0258     orr r3,r2,r3,lsl#24
0259     ldrb    r2,[lr,#-6]
0260     adcs    $h1,$h1,r3
0261 
0262     ldrb    r3,[lr,#-5]
0263     orr r1,r0,r1,lsl#8
0264     ldrb    r0,[lr,#-4]
0265     orr r2,r1,r2,lsl#16
0266     ldrb    r1,[lr,#-3]
0267     orr r3,r2,r3,lsl#24
0268     ldrb    r2,[lr,#-2]
0269     adcs    $h2,$h2,r3
0270 
0271     ldrb    r3,[lr,#-1]
0272     orr r1,r0,r1,lsl#8
0273     str lr,[sp,#8]      @ offload input pointer
0274     orr r2,r1,r2,lsl#16
0275     add $s1,$r1,$r1,lsr#2
0276     orr r3,r2,r3,lsl#24
0277 #else
0278     ldr r0,[lr],#16     @ load input
0279     it  hi
0280     addhi   $h4,$h4,#1      @ padbit
0281     ldr r1,[lr,#-12]
0282     ldr r2,[lr,#-8]
0283     ldr r3,[lr,#-4]
0284 # ifdef __ARMEB__
0285     rev r0,r0
0286     rev r1,r1
0287     rev r2,r2
0288     rev r3,r3
0289 # endif
0290     adds    $h0,$h0,r0      @ accumulate input
0291     str lr,[sp,#8]      @ offload input pointer
0292     adcs    $h1,$h1,r1
0293     add $s1,$r1,$r1,lsr#2
0294     adcs    $h2,$h2,r2
0295 #endif
0296     add $s2,$r2,$r2,lsr#2
0297     adcs    $h3,$h3,r3
0298     add $s3,$r3,$r3,lsr#2
0299 
0300     umull   r2,r3,$h1,$r0
0301      adc    $h4,$h4,#0
0302     umull   r0,r1,$h0,$r0
0303     umlal   r2,r3,$h4,$s1
0304     umlal   r0,r1,$h3,$s1
0305     ldr $r1,[sp,#20]        @ reload $r1
0306     umlal   r2,r3,$h2,$s3
0307     umlal   r0,r1,$h1,$s3
0308     umlal   r2,r3,$h3,$s2
0309     umlal   r0,r1,$h2,$s2
0310     umlal   r2,r3,$h0,$r1
0311     str r0,[sp,#0]      @ future $h0
0312      mul    r0,$s2,$h4
0313     ldr $r2,[sp,#24]        @ reload $r2
0314     adds    r2,r2,r1        @ d1+=d0>>32
0315      eor    r1,r1,r1
0316     adc lr,r3,#0        @ future $h2
0317     str r2,[sp,#4]      @ future $h1
0318 
0319     mul r2,$s3,$h4
0320     eor r3,r3,r3
0321     umlal   r0,r1,$h3,$s3
0322     ldr $r3,[sp,#28]        @ reload $r3
0323     umlal   r2,r3,$h3,$r0
0324     umlal   r0,r1,$h2,$r0
0325     umlal   r2,r3,$h2,$r1
0326     umlal   r0,r1,$h1,$r1
0327     umlal   r2,r3,$h1,$r2
0328     umlal   r0,r1,$h0,$r2
0329     umlal   r2,r3,$h0,$r3
0330     ldr $h0,[sp,#0]
0331     mul $h4,$r0,$h4
0332     ldr $h1,[sp,#4]
0333 
0334     adds    $h2,lr,r0       @ d2+=d1>>32
0335     ldr lr,[sp,#8]      @ reload input pointer
0336     adc r1,r1,#0
0337     adds    $h3,r2,r1       @ d3+=d2>>32
0338     ldr r0,[sp,#16]     @ reload end pointer
0339     adc r3,r3,#0
0340     add $h4,$h4,r3      @ h4+=d3>>32
0341 
0342     and r1,$h4,#-4
0343     and $h4,$h4,#3
0344     add r1,r1,r1,lsr#2      @ *=5
0345     adds    $h0,$h0,r1
0346     adcs    $h1,$h1,#0
0347     adcs    $h2,$h2,#0
0348     adcs    $h3,$h3,#0
0349     adc $h4,$h4,#0
0350 
0351     cmp r0,lr           @ done yet?
0352     bhi .Loop
0353 
0354     ldr $ctx,[sp,#12]
0355     add sp,sp,#32
0356     stmdb   $ctx,{$h0-$h4}      @ store the result
0357 
0358 .Lno_data:
0359 #if __ARM_ARCH__>=5
0360     ldmia   sp!,{r3-r11,pc}
0361 #else
0362     ldmia   sp!,{r3-r11,lr}
0363     tst lr,#1
0364     moveq   pc,lr           @ be binary compatible with V4, yet
0365     bx  lr          @ interoperable with Thumb ISA:-)
0366 #endif
0367 .size   poly1305_blocks,.-poly1305_blocks
0368 ___
0369 }
0370 {
0371 my ($ctx,$mac,$nonce)=map("r$_",(0..2));
0372 my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11));
0373 my $g4=$ctx;
0374 
0375 $code.=<<___;
0376 .type   poly1305_emit,%function
0377 .align  5
0378 poly1305_emit:
0379 .Lpoly1305_emit:
0380     stmdb   sp!,{r4-r11}
0381 
0382     ldmia   $ctx,{$h0-$h4}
0383 
0384 #if __ARM_ARCH__>=7
0385     ldr ip,[$ctx,#36]       @ is_base2_26
0386 
0387     adds    $g0,$h0,$h1,lsl#26  @ base 2^26 -> base 2^32
0388     mov $g1,$h1,lsr#6
0389     adcs    $g1,$g1,$h2,lsl#20
0390     mov $g2,$h2,lsr#12
0391     adcs    $g2,$g2,$h3,lsl#14
0392     mov $g3,$h3,lsr#18
0393     adcs    $g3,$g3,$h4,lsl#8
0394     mov $g4,#0
0395     adc $g4,$g4,$h4,lsr#24
0396 
0397     tst ip,ip
0398     itttt   ne
0399     movne   $h0,$g0
0400     movne   $h1,$g1
0401     movne   $h2,$g2
0402     movne   $h3,$g3
0403     it  ne
0404     movne   $h4,$g4
0405 #endif
0406 
0407     adds    $g0,$h0,#5      @ compare to modulus
0408     adcs    $g1,$h1,#0
0409     adcs    $g2,$h2,#0
0410     adcs    $g3,$h3,#0
0411     adc $g4,$h4,#0
0412     tst $g4,#4          @ did it carry/borrow?
0413 
0414 #ifdef  __thumb2__
0415     it  ne
0416 #endif
0417     movne   $h0,$g0
0418     ldr $g0,[$nonce,#0]
0419 #ifdef  __thumb2__
0420     it  ne
0421 #endif
0422     movne   $h1,$g1
0423     ldr $g1,[$nonce,#4]
0424 #ifdef  __thumb2__
0425     it  ne
0426 #endif
0427     movne   $h2,$g2
0428     ldr $g2,[$nonce,#8]
0429 #ifdef  __thumb2__
0430     it  ne
0431 #endif
0432     movne   $h3,$g3
0433     ldr $g3,[$nonce,#12]
0434 
0435     adds    $h0,$h0,$g0
0436     adcs    $h1,$h1,$g1
0437     adcs    $h2,$h2,$g2
0438     adc $h3,$h3,$g3
0439 
0440 #if __ARM_ARCH__>=7
0441 # ifdef __ARMEB__
0442     rev $h0,$h0
0443     rev $h1,$h1
0444     rev $h2,$h2
0445     rev $h3,$h3
0446 # endif
0447     str $h0,[$mac,#0]
0448     str $h1,[$mac,#4]
0449     str $h2,[$mac,#8]
0450     str $h3,[$mac,#12]
0451 #else
0452     strb    $h0,[$mac,#0]
0453     mov $h0,$h0,lsr#8
0454     strb    $h1,[$mac,#4]
0455     mov $h1,$h1,lsr#8
0456     strb    $h2,[$mac,#8]
0457     mov $h2,$h2,lsr#8
0458     strb    $h3,[$mac,#12]
0459     mov $h3,$h3,lsr#8
0460 
0461     strb    $h0,[$mac,#1]
0462     mov $h0,$h0,lsr#8
0463     strb    $h1,[$mac,#5]
0464     mov $h1,$h1,lsr#8
0465     strb    $h2,[$mac,#9]
0466     mov $h2,$h2,lsr#8
0467     strb    $h3,[$mac,#13]
0468     mov $h3,$h3,lsr#8
0469 
0470     strb    $h0,[$mac,#2]
0471     mov $h0,$h0,lsr#8
0472     strb    $h1,[$mac,#6]
0473     mov $h1,$h1,lsr#8
0474     strb    $h2,[$mac,#10]
0475     mov $h2,$h2,lsr#8
0476     strb    $h3,[$mac,#14]
0477     mov $h3,$h3,lsr#8
0478 
0479     strb    $h0,[$mac,#3]
0480     strb    $h1,[$mac,#7]
0481     strb    $h2,[$mac,#11]
0482     strb    $h3,[$mac,#15]
0483 #endif
0484     ldmia   sp!,{r4-r11}
0485 #if __ARM_ARCH__>=5
0486     ret             @ bx    lr
0487 #else
0488     tst lr,#1
0489     moveq   pc,lr           @ be binary compatible with V4, yet
0490     bx  lr          @ interoperable with Thumb ISA:-)
0491 #endif
0492 .size   poly1305_emit,.-poly1305_emit
0493 ___
0494 {
0495 my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9));
0496 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
0497 my ($T0,$T1,$MASK) = map("q$_",(15,4,0));
0498 
0499 my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7));
0500 
0501 $code.=<<___;
0502 #if __ARM_MAX_ARCH__>=7
0503 .fpu    neon
0504 
0505 .type   poly1305_init_neon,%function
0506 .align  5
0507 poly1305_init_neon:
0508 .Lpoly1305_init_neon:
0509     ldr r3,[$ctx,#48]       @ first table element
0510     cmp r3,#-1          @ is value impossible?
0511     bne .Lno_init_neon
0512 
0513     ldr r4,[$ctx,#20]       @ load key base 2^32
0514     ldr r5,[$ctx,#24]
0515     ldr r6,[$ctx,#28]
0516     ldr r7,[$ctx,#32]
0517 
0518     and r2,r4,#0x03ffffff   @ base 2^32 -> base 2^26
0519     mov r3,r4,lsr#26
0520     mov r4,r5,lsr#20
0521     orr r3,r3,r5,lsl#6
0522     mov r5,r6,lsr#14
0523     orr r4,r4,r6,lsl#12
0524     mov r6,r7,lsr#8
0525     orr r5,r5,r7,lsl#18
0526     and r3,r3,#0x03ffffff
0527     and r4,r4,#0x03ffffff
0528     and r5,r5,#0x03ffffff
0529 
0530     vdup.32 $R0,r2          @ r^1 in both lanes
0531     add r2,r3,r3,lsl#2      @ *5
0532     vdup.32 $R1,r3
0533     add r3,r4,r4,lsl#2
0534     vdup.32 $S1,r2
0535     vdup.32 $R2,r4
0536     add r4,r5,r5,lsl#2
0537     vdup.32 $S2,r3
0538     vdup.32 $R3,r5
0539     add r5,r6,r6,lsl#2
0540     vdup.32 $S3,r4
0541     vdup.32 $R4,r6
0542     vdup.32 $S4,r5
0543 
0544     mov $zeros,#2       @ counter
0545 
0546 .Lsquare_neon:
0547     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0548     @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
0549     @ d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
0550     @ d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
0551     @ d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
0552     @ d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
0553 
0554     vmull.u32   $D0,$R0,${R0}[1]
0555     vmull.u32   $D1,$R1,${R0}[1]
0556     vmull.u32   $D2,$R2,${R0}[1]
0557     vmull.u32   $D3,$R3,${R0}[1]
0558     vmull.u32   $D4,$R4,${R0}[1]
0559 
0560     vmlal.u32   $D0,$R4,${S1}[1]
0561     vmlal.u32   $D1,$R0,${R1}[1]
0562     vmlal.u32   $D2,$R1,${R1}[1]
0563     vmlal.u32   $D3,$R2,${R1}[1]
0564     vmlal.u32   $D4,$R3,${R1}[1]
0565 
0566     vmlal.u32   $D0,$R3,${S2}[1]
0567     vmlal.u32   $D1,$R4,${S2}[1]
0568     vmlal.u32   $D3,$R1,${R2}[1]
0569     vmlal.u32   $D2,$R0,${R2}[1]
0570     vmlal.u32   $D4,$R2,${R2}[1]
0571 
0572     vmlal.u32   $D0,$R2,${S3}[1]
0573     vmlal.u32   $D3,$R0,${R3}[1]
0574     vmlal.u32   $D1,$R3,${S3}[1]
0575     vmlal.u32   $D2,$R4,${S3}[1]
0576     vmlal.u32   $D4,$R1,${R3}[1]
0577 
0578     vmlal.u32   $D3,$R4,${S4}[1]
0579     vmlal.u32   $D0,$R1,${S4}[1]
0580     vmlal.u32   $D1,$R2,${S4}[1]
0581     vmlal.u32   $D2,$R3,${S4}[1]
0582     vmlal.u32   $D4,$R0,${R4}[1]
0583 
0584     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0585     @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
0586     @ and P. Schwabe
0587     @
0588     @ H0>>+H1>>+H2>>+H3>>+H4
0589     @ H3>>+H4>>*5+H0>>+H1
0590     @
0591     @ Trivia.
0592     @
0593     @ Result of multiplication of n-bit number by m-bit number is
0594     @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
0595     @ m-bit number multiplied by 2^n is still n+m bits wide.
0596     @
0597     @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
0598     @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
0599     @ one is n+1 bits wide.
0600     @
0601     @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
0602     @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
0603     @ can be 27. However! In cases when their width exceeds 26 bits
0604     @ they are limited by 2^26+2^6. This in turn means that *sum*
0605     @ of the products with these values can still be viewed as sum
0606     @ of 52-bit numbers as long as the amount of addends is not a
0607     @ power of 2. For example,
0608     @
0609     @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
0610     @
0611     @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
0612     @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
0613     @ 8 * (2^52) or 2^55. However, the value is then multiplied by
0614     @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
0615     @ which is less than 32 * (2^52) or 2^57. And when processing
0616     @ data we are looking at triple as many addends...
0617     @
0618     @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
0619     @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
0620     @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
0621     @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
0622     @ instruction accepts 2x32-bit input and writes 2x64-bit result.
0623     @ This means that result of reduction have to be compressed upon
0624     @ loop wrap-around. This can be done in the process of reduction
0625     @ to minimize amount of instructions [as well as amount of
0626     @ 128-bit instructions, which benefits low-end processors], but
0627     @ one has to watch for H2 (which is narrower than H0) and 5*H4
0628     @ not being wider than 58 bits, so that result of right shift
0629     @ by 26 bits fits in 32 bits. This is also useful on x86,
0630     @ because it allows to use paddd in place for paddq, which
0631     @ benefits Atom, where paddq is ridiculously slow.
0632 
0633     vshr.u64    $T0,$D3,#26
0634     vmovn.i64   $D3#lo,$D3
0635      vshr.u64   $T1,$D0,#26
0636      vmovn.i64  $D0#lo,$D0
0637     vadd.i64    $D4,$D4,$T0     @ h3 -> h4
0638     vbic.i32    $D3#lo,#0xfc000000  @ &=0x03ffffff
0639      vadd.i64   $D1,$D1,$T1     @ h0 -> h1
0640      vbic.i32   $D0#lo,#0xfc000000
0641 
0642     vshrn.u64   $T0#lo,$D4,#26
0643     vmovn.i64   $D4#lo,$D4
0644      vshr.u64   $T1,$D1,#26
0645      vmovn.i64  $D1#lo,$D1
0646      vadd.i64   $D2,$D2,$T1     @ h1 -> h2
0647     vbic.i32    $D4#lo,#0xfc000000
0648      vbic.i32   $D1#lo,#0xfc000000
0649 
0650     vadd.i32    $D0#lo,$D0#lo,$T0#lo
0651     vshl.u32    $T0#lo,$T0#lo,#2
0652      vshrn.u64  $T1#lo,$D2,#26
0653      vmovn.i64  $D2#lo,$D2
0654     vadd.i32    $D0#lo,$D0#lo,$T0#lo    @ h4 -> h0
0655      vadd.i32   $D3#lo,$D3#lo,$T1#lo    @ h2 -> h3
0656      vbic.i32   $D2#lo,#0xfc000000
0657 
0658     vshr.u32    $T0#lo,$D0#lo,#26
0659     vbic.i32    $D0#lo,#0xfc000000
0660      vshr.u32   $T1#lo,$D3#lo,#26
0661      vbic.i32   $D3#lo,#0xfc000000
0662     vadd.i32    $D1#lo,$D1#lo,$T0#lo    @ h0 -> h1
0663      vadd.i32   $D4#lo,$D4#lo,$T1#lo    @ h3 -> h4
0664 
0665     subs        $zeros,$zeros,#1
0666     beq     .Lsquare_break_neon
0667 
0668     add     $tbl0,$ctx,#(48+0*9*4)
0669     add     $tbl1,$ctx,#(48+1*9*4)
0670 
0671     vtrn.32     $R0,$D0#lo      @ r^2:r^1
0672     vtrn.32     $R2,$D2#lo
0673     vtrn.32     $R3,$D3#lo
0674     vtrn.32     $R1,$D1#lo
0675     vtrn.32     $R4,$D4#lo
0676 
0677     vshl.u32    $S2,$R2,#2      @ *5
0678     vshl.u32    $S3,$R3,#2
0679     vshl.u32    $S1,$R1,#2
0680     vshl.u32    $S4,$R4,#2
0681     vadd.i32    $S2,$S2,$R2
0682     vadd.i32    $S1,$S1,$R1
0683     vadd.i32    $S3,$S3,$R3
0684     vadd.i32    $S4,$S4,$R4
0685 
0686     vst4.32     {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
0687     vst4.32     {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
0688     vst4.32     {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
0689     vst4.32     {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
0690     vst1.32     {${S4}[0]},[$tbl0,:32]
0691     vst1.32     {${S4}[1]},[$tbl1,:32]
0692 
0693     b       .Lsquare_neon
0694 
0695 .align  4
0696 .Lsquare_break_neon:
0697     add     $tbl0,$ctx,#(48+2*4*9)
0698     add     $tbl1,$ctx,#(48+3*4*9)
0699 
0700     vmov        $R0,$D0#lo      @ r^4:r^3
0701     vshl.u32    $S1,$D1#lo,#2       @ *5
0702     vmov        $R1,$D1#lo
0703     vshl.u32    $S2,$D2#lo,#2
0704     vmov        $R2,$D2#lo
0705     vshl.u32    $S3,$D3#lo,#2
0706     vmov        $R3,$D3#lo
0707     vshl.u32    $S4,$D4#lo,#2
0708     vmov        $R4,$D4#lo
0709     vadd.i32    $S1,$S1,$D1#lo
0710     vadd.i32    $S2,$S2,$D2#lo
0711     vadd.i32    $S3,$S3,$D3#lo
0712     vadd.i32    $S4,$S4,$D4#lo
0713 
0714     vst4.32     {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
0715     vst4.32     {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
0716     vst4.32     {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
0717     vst4.32     {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
0718     vst1.32     {${S4}[0]},[$tbl0]
0719     vst1.32     {${S4}[1]},[$tbl1]
0720 
0721 .Lno_init_neon:
0722     ret             @ bx    lr
0723 .size   poly1305_init_neon,.-poly1305_init_neon
0724 
0725 .type   poly1305_blocks_neon,%function
0726 .align  5
0727 poly1305_blocks_neon:
0728 .Lpoly1305_blocks_neon:
0729     ldr ip,[$ctx,#36]       @ is_base2_26
0730 
0731     cmp $len,#64
0732     blo .Lpoly1305_blocks
0733 
0734     stmdb   sp!,{r4-r7}
0735     vstmdb  sp!,{d8-d15}        @ ABI specification says so
0736 
0737     tst ip,ip           @ is_base2_26?
0738     bne .Lbase2_26_neon
0739 
0740     stmdb   sp!,{r1-r3,lr}
0741     bl  .Lpoly1305_init_neon
0742 
0743     ldr r4,[$ctx,#0]        @ load hash value base 2^32
0744     ldr r5,[$ctx,#4]
0745     ldr r6,[$ctx,#8]
0746     ldr r7,[$ctx,#12]
0747     ldr ip,[$ctx,#16]
0748 
0749     and r2,r4,#0x03ffffff   @ base 2^32 -> base 2^26
0750     mov r3,r4,lsr#26
0751      veor   $D0#lo,$D0#lo,$D0#lo
0752     mov r4,r5,lsr#20
0753     orr r3,r3,r5,lsl#6
0754      veor   $D1#lo,$D1#lo,$D1#lo
0755     mov r5,r6,lsr#14
0756     orr r4,r4,r6,lsl#12
0757      veor   $D2#lo,$D2#lo,$D2#lo
0758     mov r6,r7,lsr#8
0759     orr r5,r5,r7,lsl#18
0760      veor   $D3#lo,$D3#lo,$D3#lo
0761     and r3,r3,#0x03ffffff
0762     orr r6,r6,ip,lsl#24
0763      veor   $D4#lo,$D4#lo,$D4#lo
0764     and r4,r4,#0x03ffffff
0765     mov r1,#1
0766     and r5,r5,#0x03ffffff
0767     str r1,[$ctx,#36]       @ set is_base2_26
0768 
0769     vmov.32 $D0#lo[0],r2
0770     vmov.32 $D1#lo[0],r3
0771     vmov.32 $D2#lo[0],r4
0772     vmov.32 $D3#lo[0],r5
0773     vmov.32 $D4#lo[0],r6
0774     adr $zeros,.Lzeros
0775 
0776     ldmia   sp!,{r1-r3,lr}
0777     b   .Lhash_loaded
0778 
0779 .align  4
0780 .Lbase2_26_neon:
0781     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0782     @ load hash value
0783 
0784     veor        $D0#lo,$D0#lo,$D0#lo
0785     veor        $D1#lo,$D1#lo,$D1#lo
0786     veor        $D2#lo,$D2#lo,$D2#lo
0787     veor        $D3#lo,$D3#lo,$D3#lo
0788     veor        $D4#lo,$D4#lo,$D4#lo
0789     vld4.32     {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
0790     adr     $zeros,.Lzeros
0791     vld1.32     {$D4#lo[0]},[$ctx]
0792     sub     $ctx,$ctx,#16       @ rewind
0793 
0794 .Lhash_loaded:
0795     add     $in2,$inp,#32
0796     mov     $padbit,$padbit,lsl#24
0797     tst     $len,#31
0798     beq     .Leven
0799 
0800     vld4.32     {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]!
0801     vmov.32     $H4#lo[0],$padbit
0802     sub     $len,$len,#16
0803     add     $in2,$inp,#32
0804 
0805 # ifdef __ARMEB__
0806     vrev32.8    $H0,$H0
0807     vrev32.8    $H3,$H3
0808     vrev32.8    $H1,$H1
0809     vrev32.8    $H2,$H2
0810 # endif
0811     vsri.u32    $H4#lo,$H3#lo,#8    @ base 2^32 -> base 2^26
0812     vshl.u32    $H3#lo,$H3#lo,#18
0813 
0814     vsri.u32    $H3#lo,$H2#lo,#14
0815     vshl.u32    $H2#lo,$H2#lo,#12
0816     vadd.i32    $H4#hi,$H4#lo,$D4#lo    @ add hash value and move to #hi
0817 
0818     vbic.i32    $H3#lo,#0xfc000000
0819     vsri.u32    $H2#lo,$H1#lo,#20
0820     vshl.u32    $H1#lo,$H1#lo,#6
0821 
0822     vbic.i32    $H2#lo,#0xfc000000
0823     vsri.u32    $H1#lo,$H0#lo,#26
0824     vadd.i32    $H3#hi,$H3#lo,$D3#lo
0825 
0826     vbic.i32    $H0#lo,#0xfc000000
0827     vbic.i32    $H1#lo,#0xfc000000
0828     vadd.i32    $H2#hi,$H2#lo,$D2#lo
0829 
0830     vadd.i32    $H0#hi,$H0#lo,$D0#lo
0831     vadd.i32    $H1#hi,$H1#lo,$D1#lo
0832 
0833     mov     $tbl1,$zeros
0834     add     $tbl0,$ctx,#48
0835 
0836     cmp     $len,$len
0837     b       .Long_tail
0838 
0839 .align  4
0840 .Leven:
0841     subs        $len,$len,#64
0842     it      lo
0843     movlo       $in2,$zeros
0844 
0845     vmov.i32    $H4,#1<<24      @ padbit, yes, always
0846     vld4.32     {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp]    @ inp[0:1]
0847     add     $inp,$inp,#64
0848     vld4.32     {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2]    @ inp[2:3] (or 0)
0849     add     $in2,$in2,#64
0850     itt     hi
0851     addhi       $tbl1,$ctx,#(48+1*9*4)
0852     addhi       $tbl0,$ctx,#(48+3*9*4)
0853 
0854 # ifdef __ARMEB__
0855     vrev32.8    $H0,$H0
0856     vrev32.8    $H3,$H3
0857     vrev32.8    $H1,$H1
0858     vrev32.8    $H2,$H2
0859 # endif
0860     vsri.u32    $H4,$H3,#8      @ base 2^32 -> base 2^26
0861     vshl.u32    $H3,$H3,#18
0862 
0863     vsri.u32    $H3,$H2,#14
0864     vshl.u32    $H2,$H2,#12
0865 
0866     vbic.i32    $H3,#0xfc000000
0867     vsri.u32    $H2,$H1,#20
0868     vshl.u32    $H1,$H1,#6
0869 
0870     vbic.i32    $H2,#0xfc000000
0871     vsri.u32    $H1,$H0,#26
0872 
0873     vbic.i32    $H0,#0xfc000000
0874     vbic.i32    $H1,#0xfc000000
0875 
0876     bls     .Lskip_loop
0877 
0878     vld4.32     {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^2
0879     vld4.32     {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^4
0880     vld4.32     {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
0881     vld4.32     {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
0882     b       .Loop_neon
0883 
0884 .align  5
0885 .Loop_neon:
0886     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0887     @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
0888     @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
0889     @   \___________________/
0890     @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
0891     @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
0892     @   \___________________/ \____________________/
0893     @
0894     @ Note that we start with inp[2:3]*r^2. This is because it
0895     @ doesn't depend on reduction in previous iteration.
0896     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0897     @ d4 = h4*r0 + h3*r1   + h2*r2   + h1*r3   + h0*r4
0898     @ d3 = h3*r0 + h2*r1   + h1*r2   + h0*r3   + h4*5*r4
0899     @ d2 = h2*r0 + h1*r1   + h0*r2   + h4*5*r3 + h3*5*r4
0900     @ d1 = h1*r0 + h0*r1   + h4*5*r2 + h3*5*r3 + h2*5*r4
0901     @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
0902 
0903     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0904     @ inp[2:3]*r^2
0905 
0906     vadd.i32    $H2#lo,$H2#lo,$D2#lo    @ accumulate inp[0:1]
0907     vmull.u32   $D2,$H2#hi,${R0}[1]
0908     vadd.i32    $H0#lo,$H0#lo,$D0#lo
0909     vmull.u32   $D0,$H0#hi,${R0}[1]
0910     vadd.i32    $H3#lo,$H3#lo,$D3#lo
0911     vmull.u32   $D3,$H3#hi,${R0}[1]
0912     vmlal.u32   $D2,$H1#hi,${R1}[1]
0913     vadd.i32    $H1#lo,$H1#lo,$D1#lo
0914     vmull.u32   $D1,$H1#hi,${R0}[1]
0915 
0916     vadd.i32    $H4#lo,$H4#lo,$D4#lo
0917     vmull.u32   $D4,$H4#hi,${R0}[1]
0918     subs        $len,$len,#64
0919     vmlal.u32   $D0,$H4#hi,${S1}[1]
0920     it      lo
0921     movlo       $in2,$zeros
0922     vmlal.u32   $D3,$H2#hi,${R1}[1]
0923     vld1.32     ${S4}[1],[$tbl1,:32]
0924     vmlal.u32   $D1,$H0#hi,${R1}[1]
0925     vmlal.u32   $D4,$H3#hi,${R1}[1]
0926 
0927     vmlal.u32   $D0,$H3#hi,${S2}[1]
0928     vmlal.u32   $D3,$H1#hi,${R2}[1]
0929     vmlal.u32   $D4,$H2#hi,${R2}[1]
0930     vmlal.u32   $D1,$H4#hi,${S2}[1]
0931     vmlal.u32   $D2,$H0#hi,${R2}[1]
0932 
0933     vmlal.u32   $D3,$H0#hi,${R3}[1]
0934     vmlal.u32   $D0,$H2#hi,${S3}[1]
0935     vmlal.u32   $D4,$H1#hi,${R3}[1]
0936     vmlal.u32   $D1,$H3#hi,${S3}[1]
0937     vmlal.u32   $D2,$H4#hi,${S3}[1]
0938 
0939     vmlal.u32   $D3,$H4#hi,${S4}[1]
0940     vmlal.u32   $D0,$H1#hi,${S4}[1]
0941     vmlal.u32   $D4,$H0#hi,${R4}[1]
0942     vmlal.u32   $D1,$H2#hi,${S4}[1]
0943     vmlal.u32   $D2,$H3#hi,${S4}[1]
0944 
0945     vld4.32     {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2]    @ inp[2:3] (or 0)
0946     add     $in2,$in2,#64
0947 
0948     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0949     @ (hash+inp[0:1])*r^4 and accumulate
0950 
0951     vmlal.u32   $D3,$H3#lo,${R0}[0]
0952     vmlal.u32   $D0,$H0#lo,${R0}[0]
0953     vmlal.u32   $D4,$H4#lo,${R0}[0]
0954     vmlal.u32   $D1,$H1#lo,${R0}[0]
0955     vmlal.u32   $D2,$H2#lo,${R0}[0]
0956     vld1.32     ${S4}[0],[$tbl0,:32]
0957 
0958     vmlal.u32   $D3,$H2#lo,${R1}[0]
0959     vmlal.u32   $D0,$H4#lo,${S1}[0]
0960     vmlal.u32   $D4,$H3#lo,${R1}[0]
0961     vmlal.u32   $D1,$H0#lo,${R1}[0]
0962     vmlal.u32   $D2,$H1#lo,${R1}[0]
0963 
0964     vmlal.u32   $D3,$H1#lo,${R2}[0]
0965     vmlal.u32   $D0,$H3#lo,${S2}[0]
0966     vmlal.u32   $D4,$H2#lo,${R2}[0]
0967     vmlal.u32   $D1,$H4#lo,${S2}[0]
0968     vmlal.u32   $D2,$H0#lo,${R2}[0]
0969 
0970     vmlal.u32   $D3,$H0#lo,${R3}[0]
0971     vmlal.u32   $D0,$H2#lo,${S3}[0]
0972     vmlal.u32   $D4,$H1#lo,${R3}[0]
0973     vmlal.u32   $D1,$H3#lo,${S3}[0]
0974     vmlal.u32   $D3,$H4#lo,${S4}[0]
0975 
0976     vmlal.u32   $D2,$H4#lo,${S3}[0]
0977     vmlal.u32   $D0,$H1#lo,${S4}[0]
0978     vmlal.u32   $D4,$H0#lo,${R4}[0]
0979     vmov.i32    $H4,#1<<24      @ padbit, yes, always
0980     vmlal.u32   $D1,$H2#lo,${S4}[0]
0981     vmlal.u32   $D2,$H3#lo,${S4}[0]
0982 
0983     vld4.32     {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp]    @ inp[0:1]
0984     add     $inp,$inp,#64
0985 # ifdef __ARMEB__
0986     vrev32.8    $H0,$H0
0987     vrev32.8    $H1,$H1
0988     vrev32.8    $H2,$H2
0989     vrev32.8    $H3,$H3
0990 # endif
0991 
0992     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
0993     @ lazy reduction interleaved with base 2^32 -> base 2^26 of
0994     @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4.
0995 
0996     vshr.u64    $T0,$D3,#26
0997     vmovn.i64   $D3#lo,$D3
0998      vshr.u64   $T1,$D0,#26
0999      vmovn.i64  $D0#lo,$D0
1000     vadd.i64    $D4,$D4,$T0     @ h3 -> h4
1001     vbic.i32    $D3#lo,#0xfc000000
1002       vsri.u32  $H4,$H3,#8      @ base 2^32 -> base 2^26
1003      vadd.i64   $D1,$D1,$T1     @ h0 -> h1
1004       vshl.u32  $H3,$H3,#18
1005      vbic.i32   $D0#lo,#0xfc000000
1006 
1007     vshrn.u64   $T0#lo,$D4,#26
1008     vmovn.i64   $D4#lo,$D4
1009      vshr.u64   $T1,$D1,#26
1010      vmovn.i64  $D1#lo,$D1
1011      vadd.i64   $D2,$D2,$T1     @ h1 -> h2
1012       vsri.u32  $H3,$H2,#14
1013     vbic.i32    $D4#lo,#0xfc000000
1014       vshl.u32  $H2,$H2,#12
1015      vbic.i32   $D1#lo,#0xfc000000
1016 
1017     vadd.i32    $D0#lo,$D0#lo,$T0#lo
1018     vshl.u32    $T0#lo,$T0#lo,#2
1019       vbic.i32  $H3,#0xfc000000
1020      vshrn.u64  $T1#lo,$D2,#26
1021      vmovn.i64  $D2#lo,$D2
1022     vaddl.u32   $D0,$D0#lo,$T0#lo   @ h4 -> h0 [widen for a sec]
1023       vsri.u32  $H2,$H1,#20
1024      vadd.i32   $D3#lo,$D3#lo,$T1#lo    @ h2 -> h3
1025       vshl.u32  $H1,$H1,#6
1026      vbic.i32   $D2#lo,#0xfc000000
1027       vbic.i32  $H2,#0xfc000000
1028 
1029     vshrn.u64   $T0#lo,$D0,#26      @ re-narrow
1030     vmovn.i64   $D0#lo,$D0
1031       vsri.u32  $H1,$H0,#26
1032       vbic.i32  $H0,#0xfc000000
1033      vshr.u32   $T1#lo,$D3#lo,#26
1034      vbic.i32   $D3#lo,#0xfc000000
1035     vbic.i32    $D0#lo,#0xfc000000
1036     vadd.i32    $D1#lo,$D1#lo,$T0#lo    @ h0 -> h1
1037      vadd.i32   $D4#lo,$D4#lo,$T1#lo    @ h3 -> h4
1038       vbic.i32  $H1,#0xfc000000
1039 
1040     bhi     .Loop_neon
1041 
1042 .Lskip_loop:
1043     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1044     @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1045 
1046     add     $tbl1,$ctx,#(48+0*9*4)
1047     add     $tbl0,$ctx,#(48+1*9*4)
1048     adds        $len,$len,#32
1049     it      ne
1050     movne       $len,#0
1051     bne     .Long_tail
1052 
1053     vadd.i32    $H2#hi,$H2#lo,$D2#lo    @ add hash value and move to #hi
1054     vadd.i32    $H0#hi,$H0#lo,$D0#lo
1055     vadd.i32    $H3#hi,$H3#lo,$D3#lo
1056     vadd.i32    $H1#hi,$H1#lo,$D1#lo
1057     vadd.i32    $H4#hi,$H4#lo,$D4#lo
1058 
1059 .Long_tail:
1060     vld4.32     {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^1
1061     vld4.32     {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^2
1062 
1063     vadd.i32    $H2#lo,$H2#lo,$D2#lo    @ can be redundant
1064     vmull.u32   $D2,$H2#hi,$R0
1065     vadd.i32    $H0#lo,$H0#lo,$D0#lo
1066     vmull.u32   $D0,$H0#hi,$R0
1067     vadd.i32    $H3#lo,$H3#lo,$D3#lo
1068     vmull.u32   $D3,$H3#hi,$R0
1069     vadd.i32    $H1#lo,$H1#lo,$D1#lo
1070     vmull.u32   $D1,$H1#hi,$R0
1071     vadd.i32    $H4#lo,$H4#lo,$D4#lo
1072     vmull.u32   $D4,$H4#hi,$R0
1073 
1074     vmlal.u32   $D0,$H4#hi,$S1
1075     vld4.32     {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1076     vmlal.u32   $D3,$H2#hi,$R1
1077     vld4.32     {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1078     vmlal.u32   $D1,$H0#hi,$R1
1079     vmlal.u32   $D4,$H3#hi,$R1
1080     vmlal.u32   $D2,$H1#hi,$R1
1081 
1082     vmlal.u32   $D3,$H1#hi,$R2
1083     vld1.32     ${S4}[1],[$tbl1,:32]
1084     vmlal.u32   $D0,$H3#hi,$S2
1085     vld1.32     ${S4}[0],[$tbl0,:32]
1086     vmlal.u32   $D4,$H2#hi,$R2
1087     vmlal.u32   $D1,$H4#hi,$S2
1088     vmlal.u32   $D2,$H0#hi,$R2
1089 
1090     vmlal.u32   $D3,$H0#hi,$R3
1091      it     ne
1092      addne      $tbl1,$ctx,#(48+2*9*4)
1093     vmlal.u32   $D0,$H2#hi,$S3
1094      it     ne
1095      addne      $tbl0,$ctx,#(48+3*9*4)
1096     vmlal.u32   $D4,$H1#hi,$R3
1097     vmlal.u32   $D1,$H3#hi,$S3
1098     vmlal.u32   $D2,$H4#hi,$S3
1099 
1100     vmlal.u32   $D3,$H4#hi,$S4
1101      vorn       $MASK,$MASK,$MASK   @ all-ones, can be redundant
1102     vmlal.u32   $D0,$H1#hi,$S4
1103      vshr.u64   $MASK,$MASK,#38
1104     vmlal.u32   $D4,$H0#hi,$R4
1105     vmlal.u32   $D1,$H2#hi,$S4
1106     vmlal.u32   $D2,$H3#hi,$S4
1107 
1108     beq     .Lshort_tail
1109 
1110     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1111     @ (hash+inp[0:1])*r^4:r^3 and accumulate
1112 
1113     vld4.32     {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!  @ load r^3
1114     vld4.32     {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!  @ load r^4
1115 
1116     vmlal.u32   $D2,$H2#lo,$R0
1117     vmlal.u32   $D0,$H0#lo,$R0
1118     vmlal.u32   $D3,$H3#lo,$R0
1119     vmlal.u32   $D1,$H1#lo,$R0
1120     vmlal.u32   $D4,$H4#lo,$R0
1121 
1122     vmlal.u32   $D0,$H4#lo,$S1
1123     vld4.32     {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1124     vmlal.u32   $D3,$H2#lo,$R1
1125     vld4.32     {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1126     vmlal.u32   $D1,$H0#lo,$R1
1127     vmlal.u32   $D4,$H3#lo,$R1
1128     vmlal.u32   $D2,$H1#lo,$R1
1129 
1130     vmlal.u32   $D3,$H1#lo,$R2
1131     vld1.32     ${S4}[1],[$tbl1,:32]
1132     vmlal.u32   $D0,$H3#lo,$S2
1133     vld1.32     ${S4}[0],[$tbl0,:32]
1134     vmlal.u32   $D4,$H2#lo,$R2
1135     vmlal.u32   $D1,$H4#lo,$S2
1136     vmlal.u32   $D2,$H0#lo,$R2
1137 
1138     vmlal.u32   $D3,$H0#lo,$R3
1139     vmlal.u32   $D0,$H2#lo,$S3
1140     vmlal.u32   $D4,$H1#lo,$R3
1141     vmlal.u32   $D1,$H3#lo,$S3
1142     vmlal.u32   $D2,$H4#lo,$S3
1143 
1144     vmlal.u32   $D3,$H4#lo,$S4
1145      vorn       $MASK,$MASK,$MASK   @ all-ones
1146     vmlal.u32   $D0,$H1#lo,$S4
1147      vshr.u64   $MASK,$MASK,#38
1148     vmlal.u32   $D4,$H0#lo,$R4
1149     vmlal.u32   $D1,$H2#lo,$S4
1150     vmlal.u32   $D2,$H3#lo,$S4
1151 
1152 .Lshort_tail:
1153     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1154     @ horizontal addition
1155 
1156     vadd.i64    $D3#lo,$D3#lo,$D3#hi
1157     vadd.i64    $D0#lo,$D0#lo,$D0#hi
1158     vadd.i64    $D4#lo,$D4#lo,$D4#hi
1159     vadd.i64    $D1#lo,$D1#lo,$D1#hi
1160     vadd.i64    $D2#lo,$D2#lo,$D2#hi
1161 
1162     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1163     @ lazy reduction, but without narrowing
1164 
1165     vshr.u64    $T0,$D3,#26
1166     vand.i64    $D3,$D3,$MASK
1167      vshr.u64   $T1,$D0,#26
1168      vand.i64   $D0,$D0,$MASK
1169     vadd.i64    $D4,$D4,$T0     @ h3 -> h4
1170      vadd.i64   $D1,$D1,$T1     @ h0 -> h1
1171 
1172     vshr.u64    $T0,$D4,#26
1173     vand.i64    $D4,$D4,$MASK
1174      vshr.u64   $T1,$D1,#26
1175      vand.i64   $D1,$D1,$MASK
1176      vadd.i64   $D2,$D2,$T1     @ h1 -> h2
1177 
1178     vadd.i64    $D0,$D0,$T0
1179     vshl.u64    $T0,$T0,#2
1180      vshr.u64   $T1,$D2,#26
1181      vand.i64   $D2,$D2,$MASK
1182     vadd.i64    $D0,$D0,$T0     @ h4 -> h0
1183      vadd.i64   $D3,$D3,$T1     @ h2 -> h3
1184 
1185     vshr.u64    $T0,$D0,#26
1186     vand.i64    $D0,$D0,$MASK
1187      vshr.u64   $T1,$D3,#26
1188      vand.i64   $D3,$D3,$MASK
1189     vadd.i64    $D1,$D1,$T0     @ h0 -> h1
1190      vadd.i64   $D4,$D4,$T1     @ h3 -> h4
1191 
1192     cmp     $len,#0
1193     bne     .Leven
1194 
1195     @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1196     @ store hash value
1197 
1198     vst4.32     {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
1199     vst1.32     {$D4#lo[0]},[$ctx]
1200 
1201     vldmia  sp!,{d8-d15}            @ epilogue
1202     ldmia   sp!,{r4-r7}
1203     ret                 @ bx    lr
1204 .size   poly1305_blocks_neon,.-poly1305_blocks_neon
1205 
1206 .align  5
1207 .Lzeros:
1208 .long   0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1209 #ifndef __KERNEL__
1210 .LOPENSSL_armcap:
1211 # ifdef _WIN32
1212 .word   OPENSSL_armcap_P
1213 # else
1214 .word   OPENSSL_armcap_P-.Lpoly1305_init
1215 # endif
1216 .comm   OPENSSL_armcap_P,4,4
1217 .hidden OPENSSL_armcap_P
1218 #endif
1219 #endif
1220 ___
1221 }   }
1222 $code.=<<___;
1223 .asciz  "Poly1305 for ARMv4/NEON, CRYPTOGAMS by \@dot-asm"
1224 .align  2
1225 ___
1226 
1227 foreach (split("\n",$code)) {
1228     s/\`([^\`]*)\`/eval $1/geo;
1229 
1230     s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo   or
1231     s/\bret\b/bx    lr/go                       or
1232     s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
1233 
1234     print $_,"\n";
1235 }
1236 close STDOUT; # enforce flush