0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/linkage.h>
0009
0010 .section .rodata.cst32.CTR2BL, "aM", @progbits, 32
0011 .align 32
0012 CTR2BL: .octa 0x00000000000000000000000000000000
0013 .octa 0x00000000000000000000000000000001
0014
0015 .section .rodata.cst32.CTR4BL, "aM", @progbits, 32
0016 .align 32
0017 CTR4BL: .octa 0x00000000000000000000000000000002
0018 .octa 0x00000000000000000000000000000003
0019
0020 .section .rodata.cst32.CTR8BL, "aM", @progbits, 32
0021 .align 32
0022 CTR8BL: .octa 0x00000003000000020000000100000000
0023 .octa 0x00000007000000060000000500000004
0024
0025 .text
0026
0027 SYM_FUNC_START(chacha_2block_xor_avx512vl)
0028 # %rdi: Input state matrix, s
0029 # %rsi: up to 2 data blocks output, o
0030 # %rdx: up to 2 data blocks input, i
0031 # %rcx: input/output length in bytes
0032 # %r8d: nrounds
0033
0034 # This function encrypts two ChaCha blocks by loading the state
0035 # matrix twice across four AVX registers. It performs matrix operations
0036 # on four words in each matrix in parallel, but requires shuffling to
0037 # rearrange the words after each round.
0038
0039 vzeroupper
0040
0041 # x0..3[0-2] = s0..3
0042 vbroadcasti128 0x00(%rdi),%ymm0
0043 vbroadcasti128 0x10(%rdi),%ymm1
0044 vbroadcasti128 0x20(%rdi),%ymm2
0045 vbroadcasti128 0x30(%rdi),%ymm3
0046
0047 vpaddd CTR2BL(%rip),%ymm3,%ymm3
0048
0049 vmovdqa %ymm0,%ymm8
0050 vmovdqa %ymm1,%ymm9
0051 vmovdqa %ymm2,%ymm10
0052 vmovdqa %ymm3,%ymm11
0053
0054 .Ldoubleround:
0055
0056 # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
0057 vpaddd %ymm1,%ymm0,%ymm0
0058 vpxord %ymm0,%ymm3,%ymm3
0059 vprold $16,%ymm3,%ymm3
0060
0061 # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
0062 vpaddd %ymm3,%ymm2,%ymm2
0063 vpxord %ymm2,%ymm1,%ymm1
0064 vprold $12,%ymm1,%ymm1
0065
0066 # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
0067 vpaddd %ymm1,%ymm0,%ymm0
0068 vpxord %ymm0,%ymm3,%ymm3
0069 vprold $8,%ymm3,%ymm3
0070
0071 # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
0072 vpaddd %ymm3,%ymm2,%ymm2
0073 vpxord %ymm2,%ymm1,%ymm1
0074 vprold $7,%ymm1,%ymm1
0075
0076 # x1 = shuffle32(x1, MASK(0, 3, 2, 1))
0077 vpshufd $0x39,%ymm1,%ymm1
0078 # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
0079 vpshufd $0x4e,%ymm2,%ymm2
0080 # x3 = shuffle32(x3, MASK(2, 1, 0, 3))
0081 vpshufd $0x93,%ymm3,%ymm3
0082
0083 # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
0084 vpaddd %ymm1,%ymm0,%ymm0
0085 vpxord %ymm0,%ymm3,%ymm3
0086 vprold $16,%ymm3,%ymm3
0087
0088 # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
0089 vpaddd %ymm3,%ymm2,%ymm2
0090 vpxord %ymm2,%ymm1,%ymm1
0091 vprold $12,%ymm1,%ymm1
0092
0093 # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
0094 vpaddd %ymm1,%ymm0,%ymm0
0095 vpxord %ymm0,%ymm3,%ymm3
0096 vprold $8,%ymm3,%ymm3
0097
0098 # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
0099 vpaddd %ymm3,%ymm2,%ymm2
0100 vpxord %ymm2,%ymm1,%ymm1
0101 vprold $7,%ymm1,%ymm1
0102
0103 # x1 = shuffle32(x1, MASK(2, 1, 0, 3))
0104 vpshufd $0x93,%ymm1,%ymm1
0105 # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
0106 vpshufd $0x4e,%ymm2,%ymm2
0107 # x3 = shuffle32(x3, MASK(0, 3, 2, 1))
0108 vpshufd $0x39,%ymm3,%ymm3
0109
0110 sub $2,%r8d
0111 jnz .Ldoubleround
0112
0113 # o0 = i0 ^ (x0 + s0)
0114 vpaddd %ymm8,%ymm0,%ymm7
0115 cmp $0x10,%rcx
0116 jl .Lxorpart2
0117 vpxord 0x00(%rdx),%xmm7,%xmm6
0118 vmovdqu %xmm6,0x00(%rsi)
0119 vextracti128 $1,%ymm7,%xmm0
0120 # o1 = i1 ^ (x1 + s1)
0121 vpaddd %ymm9,%ymm1,%ymm7
0122 cmp $0x20,%rcx
0123 jl .Lxorpart2
0124 vpxord 0x10(%rdx),%xmm7,%xmm6
0125 vmovdqu %xmm6,0x10(%rsi)
0126 vextracti128 $1,%ymm7,%xmm1
0127 # o2 = i2 ^ (x2 + s2)
0128 vpaddd %ymm10,%ymm2,%ymm7
0129 cmp $0x30,%rcx
0130 jl .Lxorpart2
0131 vpxord 0x20(%rdx),%xmm7,%xmm6
0132 vmovdqu %xmm6,0x20(%rsi)
0133 vextracti128 $1,%ymm7,%xmm2
0134 # o3 = i3 ^ (x3 + s3)
0135 vpaddd %ymm11,%ymm3,%ymm7
0136 cmp $0x40,%rcx
0137 jl .Lxorpart2
0138 vpxord 0x30(%rdx),%xmm7,%xmm6
0139 vmovdqu %xmm6,0x30(%rsi)
0140 vextracti128 $1,%ymm7,%xmm3
0141
0142 # xor and write second block
0143 vmovdqa %xmm0,%xmm7
0144 cmp $0x50,%rcx
0145 jl .Lxorpart2
0146 vpxord 0x40(%rdx),%xmm7,%xmm6
0147 vmovdqu %xmm6,0x40(%rsi)
0148
0149 vmovdqa %xmm1,%xmm7
0150 cmp $0x60,%rcx
0151 jl .Lxorpart2
0152 vpxord 0x50(%rdx),%xmm7,%xmm6
0153 vmovdqu %xmm6,0x50(%rsi)
0154
0155 vmovdqa %xmm2,%xmm7
0156 cmp $0x70,%rcx
0157 jl .Lxorpart2
0158 vpxord 0x60(%rdx),%xmm7,%xmm6
0159 vmovdqu %xmm6,0x60(%rsi)
0160
0161 vmovdqa %xmm3,%xmm7
0162 cmp $0x80,%rcx
0163 jl .Lxorpart2
0164 vpxord 0x70(%rdx),%xmm7,%xmm6
0165 vmovdqu %xmm6,0x70(%rsi)
0166
0167 .Ldone2:
0168 vzeroupper
0169 RET
0170
0171 .Lxorpart2:
0172 # xor remaining bytes from partial register into output
0173 mov %rcx,%rax
0174 and $0xf,%rcx
0175 jz .Ldone2
0176 mov %rax,%r9
0177 and $~0xf,%r9
0178
0179 mov $1,%rax
0180 shld %cl,%rax,%rax
0181 sub $1,%rax
0182 kmovq %rax,%k1
0183
0184 vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z}
0185 vpxord %xmm7,%xmm1,%xmm1
0186 vmovdqu8 %xmm1,(%rsi,%r9){%k1}
0187
0188 jmp .Ldone2
0189
0190 SYM_FUNC_END(chacha_2block_xor_avx512vl)
0191
0192 SYM_FUNC_START(chacha_4block_xor_avx512vl)
0193 # %rdi: Input state matrix, s
0194 # %rsi: up to 4 data blocks output, o
0195 # %rdx: up to 4 data blocks input, i
0196 # %rcx: input/output length in bytes
0197 # %r8d: nrounds
0198
0199 # This function encrypts four ChaCha blocks by loading the state
0200 # matrix four times across eight AVX registers. It performs matrix
0201 # operations on four words in two matrices in parallel, sequentially
0202 # to the operations on the four words of the other two matrices. The
0203 # required word shuffling has a rather high latency, we can do the
0204 # arithmetic on two matrix-pairs without much slowdown.
0205
0206 vzeroupper
0207
0208 # x0..3[0-4] = s0..3
0209 vbroadcasti128 0x00(%rdi),%ymm0
0210 vbroadcasti128 0x10(%rdi),%ymm1
0211 vbroadcasti128 0x20(%rdi),%ymm2
0212 vbroadcasti128 0x30(%rdi),%ymm3
0213
0214 vmovdqa %ymm0,%ymm4
0215 vmovdqa %ymm1,%ymm5
0216 vmovdqa %ymm2,%ymm6
0217 vmovdqa %ymm3,%ymm7
0218
0219 vpaddd CTR2BL(%rip),%ymm3,%ymm3
0220 vpaddd CTR4BL(%rip),%ymm7,%ymm7
0221
0222 vmovdqa %ymm0,%ymm11
0223 vmovdqa %ymm1,%ymm12
0224 vmovdqa %ymm2,%ymm13
0225 vmovdqa %ymm3,%ymm14
0226 vmovdqa %ymm7,%ymm15
0227
0228 .Ldoubleround4:
0229
0230 # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
0231 vpaddd %ymm1,%ymm0,%ymm0
0232 vpxord %ymm0,%ymm3,%ymm3
0233 vprold $16,%ymm3,%ymm3
0234
0235 vpaddd %ymm5,%ymm4,%ymm4
0236 vpxord %ymm4,%ymm7,%ymm7
0237 vprold $16,%ymm7,%ymm7
0238
0239 # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
0240 vpaddd %ymm3,%ymm2,%ymm2
0241 vpxord %ymm2,%ymm1,%ymm1
0242 vprold $12,%ymm1,%ymm1
0243
0244 vpaddd %ymm7,%ymm6,%ymm6
0245 vpxord %ymm6,%ymm5,%ymm5
0246 vprold $12,%ymm5,%ymm5
0247
0248 # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
0249 vpaddd %ymm1,%ymm0,%ymm0
0250 vpxord %ymm0,%ymm3,%ymm3
0251 vprold $8,%ymm3,%ymm3
0252
0253 vpaddd %ymm5,%ymm4,%ymm4
0254 vpxord %ymm4,%ymm7,%ymm7
0255 vprold $8,%ymm7,%ymm7
0256
0257 # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
0258 vpaddd %ymm3,%ymm2,%ymm2
0259 vpxord %ymm2,%ymm1,%ymm1
0260 vprold $7,%ymm1,%ymm1
0261
0262 vpaddd %ymm7,%ymm6,%ymm6
0263 vpxord %ymm6,%ymm5,%ymm5
0264 vprold $7,%ymm5,%ymm5
0265
0266 # x1 = shuffle32(x1, MASK(0, 3, 2, 1))
0267 vpshufd $0x39,%ymm1,%ymm1
0268 vpshufd $0x39,%ymm5,%ymm5
0269 # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
0270 vpshufd $0x4e,%ymm2,%ymm2
0271 vpshufd $0x4e,%ymm6,%ymm6
0272 # x3 = shuffle32(x3, MASK(2, 1, 0, 3))
0273 vpshufd $0x93,%ymm3,%ymm3
0274 vpshufd $0x93,%ymm7,%ymm7
0275
0276 # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
0277 vpaddd %ymm1,%ymm0,%ymm0
0278 vpxord %ymm0,%ymm3,%ymm3
0279 vprold $16,%ymm3,%ymm3
0280
0281 vpaddd %ymm5,%ymm4,%ymm4
0282 vpxord %ymm4,%ymm7,%ymm7
0283 vprold $16,%ymm7,%ymm7
0284
0285 # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
0286 vpaddd %ymm3,%ymm2,%ymm2
0287 vpxord %ymm2,%ymm1,%ymm1
0288 vprold $12,%ymm1,%ymm1
0289
0290 vpaddd %ymm7,%ymm6,%ymm6
0291 vpxord %ymm6,%ymm5,%ymm5
0292 vprold $12,%ymm5,%ymm5
0293
0294 # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
0295 vpaddd %ymm1,%ymm0,%ymm0
0296 vpxord %ymm0,%ymm3,%ymm3
0297 vprold $8,%ymm3,%ymm3
0298
0299 vpaddd %ymm5,%ymm4,%ymm4
0300 vpxord %ymm4,%ymm7,%ymm7
0301 vprold $8,%ymm7,%ymm7
0302
0303 # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
0304 vpaddd %ymm3,%ymm2,%ymm2
0305 vpxord %ymm2,%ymm1,%ymm1
0306 vprold $7,%ymm1,%ymm1
0307
0308 vpaddd %ymm7,%ymm6,%ymm6
0309 vpxord %ymm6,%ymm5,%ymm5
0310 vprold $7,%ymm5,%ymm5
0311
0312 # x1 = shuffle32(x1, MASK(2, 1, 0, 3))
0313 vpshufd $0x93,%ymm1,%ymm1
0314 vpshufd $0x93,%ymm5,%ymm5
0315 # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
0316 vpshufd $0x4e,%ymm2,%ymm2
0317 vpshufd $0x4e,%ymm6,%ymm6
0318 # x3 = shuffle32(x3, MASK(0, 3, 2, 1))
0319 vpshufd $0x39,%ymm3,%ymm3
0320 vpshufd $0x39,%ymm7,%ymm7
0321
0322 sub $2,%r8d
0323 jnz .Ldoubleround4
0324
0325 # o0 = i0 ^ (x0 + s0), first block
0326 vpaddd %ymm11,%ymm0,%ymm10
0327 cmp $0x10,%rcx
0328 jl .Lxorpart4
0329 vpxord 0x00(%rdx),%xmm10,%xmm9
0330 vmovdqu %xmm9,0x00(%rsi)
0331 vextracti128 $1,%ymm10,%xmm0
0332 # o1 = i1 ^ (x1 + s1), first block
0333 vpaddd %ymm12,%ymm1,%ymm10
0334 cmp $0x20,%rcx
0335 jl .Lxorpart4
0336 vpxord 0x10(%rdx),%xmm10,%xmm9
0337 vmovdqu %xmm9,0x10(%rsi)
0338 vextracti128 $1,%ymm10,%xmm1
0339 # o2 = i2 ^ (x2 + s2), first block
0340 vpaddd %ymm13,%ymm2,%ymm10
0341 cmp $0x30,%rcx
0342 jl .Lxorpart4
0343 vpxord 0x20(%rdx),%xmm10,%xmm9
0344 vmovdqu %xmm9,0x20(%rsi)
0345 vextracti128 $1,%ymm10,%xmm2
0346 # o3 = i3 ^ (x3 + s3), first block
0347 vpaddd %ymm14,%ymm3,%ymm10
0348 cmp $0x40,%rcx
0349 jl .Lxorpart4
0350 vpxord 0x30(%rdx),%xmm10,%xmm9
0351 vmovdqu %xmm9,0x30(%rsi)
0352 vextracti128 $1,%ymm10,%xmm3
0353
0354 # xor and write second block
0355 vmovdqa %xmm0,%xmm10
0356 cmp $0x50,%rcx
0357 jl .Lxorpart4
0358 vpxord 0x40(%rdx),%xmm10,%xmm9
0359 vmovdqu %xmm9,0x40(%rsi)
0360
0361 vmovdqa %xmm1,%xmm10
0362 cmp $0x60,%rcx
0363 jl .Lxorpart4
0364 vpxord 0x50(%rdx),%xmm10,%xmm9
0365 vmovdqu %xmm9,0x50(%rsi)
0366
0367 vmovdqa %xmm2,%xmm10
0368 cmp $0x70,%rcx
0369 jl .Lxorpart4
0370 vpxord 0x60(%rdx),%xmm10,%xmm9
0371 vmovdqu %xmm9,0x60(%rsi)
0372
0373 vmovdqa %xmm3,%xmm10
0374 cmp $0x80,%rcx
0375 jl .Lxorpart4
0376 vpxord 0x70(%rdx),%xmm10,%xmm9
0377 vmovdqu %xmm9,0x70(%rsi)
0378
0379 # o0 = i0 ^ (x0 + s0), third block
0380 vpaddd %ymm11,%ymm4,%ymm10
0381 cmp $0x90,%rcx
0382 jl .Lxorpart4
0383 vpxord 0x80(%rdx),%xmm10,%xmm9
0384 vmovdqu %xmm9,0x80(%rsi)
0385 vextracti128 $1,%ymm10,%xmm4
0386 # o1 = i1 ^ (x1 + s1), third block
0387 vpaddd %ymm12,%ymm5,%ymm10
0388 cmp $0xa0,%rcx
0389 jl .Lxorpart4
0390 vpxord 0x90(%rdx),%xmm10,%xmm9
0391 vmovdqu %xmm9,0x90(%rsi)
0392 vextracti128 $1,%ymm10,%xmm5
0393 # o2 = i2 ^ (x2 + s2), third block
0394 vpaddd %ymm13,%ymm6,%ymm10
0395 cmp $0xb0,%rcx
0396 jl .Lxorpart4
0397 vpxord 0xa0(%rdx),%xmm10,%xmm9
0398 vmovdqu %xmm9,0xa0(%rsi)
0399 vextracti128 $1,%ymm10,%xmm6
0400 # o3 = i3 ^ (x3 + s3), third block
0401 vpaddd %ymm15,%ymm7,%ymm10
0402 cmp $0xc0,%rcx
0403 jl .Lxorpart4
0404 vpxord 0xb0(%rdx),%xmm10,%xmm9
0405 vmovdqu %xmm9,0xb0(%rsi)
0406 vextracti128 $1,%ymm10,%xmm7
0407
0408 # xor and write fourth block
0409 vmovdqa %xmm4,%xmm10
0410 cmp $0xd0,%rcx
0411 jl .Lxorpart4
0412 vpxord 0xc0(%rdx),%xmm10,%xmm9
0413 vmovdqu %xmm9,0xc0(%rsi)
0414
0415 vmovdqa %xmm5,%xmm10
0416 cmp $0xe0,%rcx
0417 jl .Lxorpart4
0418 vpxord 0xd0(%rdx),%xmm10,%xmm9
0419 vmovdqu %xmm9,0xd0(%rsi)
0420
0421 vmovdqa %xmm6,%xmm10
0422 cmp $0xf0,%rcx
0423 jl .Lxorpart4
0424 vpxord 0xe0(%rdx),%xmm10,%xmm9
0425 vmovdqu %xmm9,0xe0(%rsi)
0426
0427 vmovdqa %xmm7,%xmm10
0428 cmp $0x100,%rcx
0429 jl .Lxorpart4
0430 vpxord 0xf0(%rdx),%xmm10,%xmm9
0431 vmovdqu %xmm9,0xf0(%rsi)
0432
0433 .Ldone4:
0434 vzeroupper
0435 RET
0436
0437 .Lxorpart4:
0438 # xor remaining bytes from partial register into output
0439 mov %rcx,%rax
0440 and $0xf,%rcx
0441 jz .Ldone4
0442 mov %rax,%r9
0443 and $~0xf,%r9
0444
0445 mov $1,%rax
0446 shld %cl,%rax,%rax
0447 sub $1,%rax
0448 kmovq %rax,%k1
0449
0450 vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z}
0451 vpxord %xmm10,%xmm1,%xmm1
0452 vmovdqu8 %xmm1,(%rsi,%r9){%k1}
0453
0454 jmp .Ldone4
0455
0456 SYM_FUNC_END(chacha_4block_xor_avx512vl)
0457
0458 SYM_FUNC_START(chacha_8block_xor_avx512vl)
0459 # %rdi: Input state matrix, s
0460 # %rsi: up to 8 data blocks output, o
0461 # %rdx: up to 8 data blocks input, i
0462 # %rcx: input/output length in bytes
0463 # %r8d: nrounds
0464
0465 # This function encrypts eight consecutive ChaCha blocks by loading
0466 # the state matrix in AVX registers eight times. Compared to AVX2, this
0467 # mostly benefits from the new rotate instructions in VL and the
0468 # additional registers.
0469
0470 vzeroupper
0471
0472 # x0..15[0-7] = s[0..15]
0473 vpbroadcastd 0x00(%rdi),%ymm0
0474 vpbroadcastd 0x04(%rdi),%ymm1
0475 vpbroadcastd 0x08(%rdi),%ymm2
0476 vpbroadcastd 0x0c(%rdi),%ymm3
0477 vpbroadcastd 0x10(%rdi),%ymm4
0478 vpbroadcastd 0x14(%rdi),%ymm5
0479 vpbroadcastd 0x18(%rdi),%ymm6
0480 vpbroadcastd 0x1c(%rdi),%ymm7
0481 vpbroadcastd 0x20(%rdi),%ymm8
0482 vpbroadcastd 0x24(%rdi),%ymm9
0483 vpbroadcastd 0x28(%rdi),%ymm10
0484 vpbroadcastd 0x2c(%rdi),%ymm11
0485 vpbroadcastd 0x30(%rdi),%ymm12
0486 vpbroadcastd 0x34(%rdi),%ymm13
0487 vpbroadcastd 0x38(%rdi),%ymm14
0488 vpbroadcastd 0x3c(%rdi),%ymm15
0489
0490 # x12 += counter values 0-3
0491 vpaddd CTR8BL(%rip),%ymm12,%ymm12
0492
0493 vmovdqa64 %ymm0,%ymm16
0494 vmovdqa64 %ymm1,%ymm17
0495 vmovdqa64 %ymm2,%ymm18
0496 vmovdqa64 %ymm3,%ymm19
0497 vmovdqa64 %ymm4,%ymm20
0498 vmovdqa64 %ymm5,%ymm21
0499 vmovdqa64 %ymm6,%ymm22
0500 vmovdqa64 %ymm7,%ymm23
0501 vmovdqa64 %ymm8,%ymm24
0502 vmovdqa64 %ymm9,%ymm25
0503 vmovdqa64 %ymm10,%ymm26
0504 vmovdqa64 %ymm11,%ymm27
0505 vmovdqa64 %ymm12,%ymm28
0506 vmovdqa64 %ymm13,%ymm29
0507 vmovdqa64 %ymm14,%ymm30
0508 vmovdqa64 %ymm15,%ymm31
0509
0510 .Ldoubleround8:
0511 # x0 += x4, x12 = rotl32(x12 ^ x0, 16)
0512 vpaddd %ymm0,%ymm4,%ymm0
0513 vpxord %ymm0,%ymm12,%ymm12
0514 vprold $16,%ymm12,%ymm12
0515 # x1 += x5, x13 = rotl32(x13 ^ x1, 16)
0516 vpaddd %ymm1,%ymm5,%ymm1
0517 vpxord %ymm1,%ymm13,%ymm13
0518 vprold $16,%ymm13,%ymm13
0519 # x2 += x6, x14 = rotl32(x14 ^ x2, 16)
0520 vpaddd %ymm2,%ymm6,%ymm2
0521 vpxord %ymm2,%ymm14,%ymm14
0522 vprold $16,%ymm14,%ymm14
0523 # x3 += x7, x15 = rotl32(x15 ^ x3, 16)
0524 vpaddd %ymm3,%ymm7,%ymm3
0525 vpxord %ymm3,%ymm15,%ymm15
0526 vprold $16,%ymm15,%ymm15
0527
0528 # x8 += x12, x4 = rotl32(x4 ^ x8, 12)
0529 vpaddd %ymm12,%ymm8,%ymm8
0530 vpxord %ymm8,%ymm4,%ymm4
0531 vprold $12,%ymm4,%ymm4
0532 # x9 += x13, x5 = rotl32(x5 ^ x9, 12)
0533 vpaddd %ymm13,%ymm9,%ymm9
0534 vpxord %ymm9,%ymm5,%ymm5
0535 vprold $12,%ymm5,%ymm5
0536 # x10 += x14, x6 = rotl32(x6 ^ x10, 12)
0537 vpaddd %ymm14,%ymm10,%ymm10
0538 vpxord %ymm10,%ymm6,%ymm6
0539 vprold $12,%ymm6,%ymm6
0540 # x11 += x15, x7 = rotl32(x7 ^ x11, 12)
0541 vpaddd %ymm15,%ymm11,%ymm11
0542 vpxord %ymm11,%ymm7,%ymm7
0543 vprold $12,%ymm7,%ymm7
0544
0545 # x0 += x4, x12 = rotl32(x12 ^ x0, 8)
0546 vpaddd %ymm0,%ymm4,%ymm0
0547 vpxord %ymm0,%ymm12,%ymm12
0548 vprold $8,%ymm12,%ymm12
0549 # x1 += x5, x13 = rotl32(x13 ^ x1, 8)
0550 vpaddd %ymm1,%ymm5,%ymm1
0551 vpxord %ymm1,%ymm13,%ymm13
0552 vprold $8,%ymm13,%ymm13
0553 # x2 += x6, x14 = rotl32(x14 ^ x2, 8)
0554 vpaddd %ymm2,%ymm6,%ymm2
0555 vpxord %ymm2,%ymm14,%ymm14
0556 vprold $8,%ymm14,%ymm14
0557 # x3 += x7, x15 = rotl32(x15 ^ x3, 8)
0558 vpaddd %ymm3,%ymm7,%ymm3
0559 vpxord %ymm3,%ymm15,%ymm15
0560 vprold $8,%ymm15,%ymm15
0561
0562 # x8 += x12, x4 = rotl32(x4 ^ x8, 7)
0563 vpaddd %ymm12,%ymm8,%ymm8
0564 vpxord %ymm8,%ymm4,%ymm4
0565 vprold $7,%ymm4,%ymm4
0566 # x9 += x13, x5 = rotl32(x5 ^ x9, 7)
0567 vpaddd %ymm13,%ymm9,%ymm9
0568 vpxord %ymm9,%ymm5,%ymm5
0569 vprold $7,%ymm5,%ymm5
0570 # x10 += x14, x6 = rotl32(x6 ^ x10, 7)
0571 vpaddd %ymm14,%ymm10,%ymm10
0572 vpxord %ymm10,%ymm6,%ymm6
0573 vprold $7,%ymm6,%ymm6
0574 # x11 += x15, x7 = rotl32(x7 ^ x11, 7)
0575 vpaddd %ymm15,%ymm11,%ymm11
0576 vpxord %ymm11,%ymm7,%ymm7
0577 vprold $7,%ymm7,%ymm7
0578
0579 # x0 += x5, x15 = rotl32(x15 ^ x0, 16)
0580 vpaddd %ymm0,%ymm5,%ymm0
0581 vpxord %ymm0,%ymm15,%ymm15
0582 vprold $16,%ymm15,%ymm15
0583 # x1 += x6, x12 = rotl32(x12 ^ x1, 16)
0584 vpaddd %ymm1,%ymm6,%ymm1
0585 vpxord %ymm1,%ymm12,%ymm12
0586 vprold $16,%ymm12,%ymm12
0587 # x2 += x7, x13 = rotl32(x13 ^ x2, 16)
0588 vpaddd %ymm2,%ymm7,%ymm2
0589 vpxord %ymm2,%ymm13,%ymm13
0590 vprold $16,%ymm13,%ymm13
0591 # x3 += x4, x14 = rotl32(x14 ^ x3, 16)
0592 vpaddd %ymm3,%ymm4,%ymm3
0593 vpxord %ymm3,%ymm14,%ymm14
0594 vprold $16,%ymm14,%ymm14
0595
0596 # x10 += x15, x5 = rotl32(x5 ^ x10, 12)
0597 vpaddd %ymm15,%ymm10,%ymm10
0598 vpxord %ymm10,%ymm5,%ymm5
0599 vprold $12,%ymm5,%ymm5
0600 # x11 += x12, x6 = rotl32(x6 ^ x11, 12)
0601 vpaddd %ymm12,%ymm11,%ymm11
0602 vpxord %ymm11,%ymm6,%ymm6
0603 vprold $12,%ymm6,%ymm6
0604 # x8 += x13, x7 = rotl32(x7 ^ x8, 12)
0605 vpaddd %ymm13,%ymm8,%ymm8
0606 vpxord %ymm8,%ymm7,%ymm7
0607 vprold $12,%ymm7,%ymm7
0608 # x9 += x14, x4 = rotl32(x4 ^ x9, 12)
0609 vpaddd %ymm14,%ymm9,%ymm9
0610 vpxord %ymm9,%ymm4,%ymm4
0611 vprold $12,%ymm4,%ymm4
0612
0613 # x0 += x5, x15 = rotl32(x15 ^ x0, 8)
0614 vpaddd %ymm0,%ymm5,%ymm0
0615 vpxord %ymm0,%ymm15,%ymm15
0616 vprold $8,%ymm15,%ymm15
0617 # x1 += x6, x12 = rotl32(x12 ^ x1, 8)
0618 vpaddd %ymm1,%ymm6,%ymm1
0619 vpxord %ymm1,%ymm12,%ymm12
0620 vprold $8,%ymm12,%ymm12
0621 # x2 += x7, x13 = rotl32(x13 ^ x2, 8)
0622 vpaddd %ymm2,%ymm7,%ymm2
0623 vpxord %ymm2,%ymm13,%ymm13
0624 vprold $8,%ymm13,%ymm13
0625 # x3 += x4, x14 = rotl32(x14 ^ x3, 8)
0626 vpaddd %ymm3,%ymm4,%ymm3
0627 vpxord %ymm3,%ymm14,%ymm14
0628 vprold $8,%ymm14,%ymm14
0629
0630 # x10 += x15, x5 = rotl32(x5 ^ x10, 7)
0631 vpaddd %ymm15,%ymm10,%ymm10
0632 vpxord %ymm10,%ymm5,%ymm5
0633 vprold $7,%ymm5,%ymm5
0634 # x11 += x12, x6 = rotl32(x6 ^ x11, 7)
0635 vpaddd %ymm12,%ymm11,%ymm11
0636 vpxord %ymm11,%ymm6,%ymm6
0637 vprold $7,%ymm6,%ymm6
0638 # x8 += x13, x7 = rotl32(x7 ^ x8, 7)
0639 vpaddd %ymm13,%ymm8,%ymm8
0640 vpxord %ymm8,%ymm7,%ymm7
0641 vprold $7,%ymm7,%ymm7
0642 # x9 += x14, x4 = rotl32(x4 ^ x9, 7)
0643 vpaddd %ymm14,%ymm9,%ymm9
0644 vpxord %ymm9,%ymm4,%ymm4
0645 vprold $7,%ymm4,%ymm4
0646
0647 sub $2,%r8d
0648 jnz .Ldoubleround8
0649
0650 # x0..15[0-3] += s[0..15]
0651 vpaddd %ymm16,%ymm0,%ymm0
0652 vpaddd %ymm17,%ymm1,%ymm1
0653 vpaddd %ymm18,%ymm2,%ymm2
0654 vpaddd %ymm19,%ymm3,%ymm3
0655 vpaddd %ymm20,%ymm4,%ymm4
0656 vpaddd %ymm21,%ymm5,%ymm5
0657 vpaddd %ymm22,%ymm6,%ymm6
0658 vpaddd %ymm23,%ymm7,%ymm7
0659 vpaddd %ymm24,%ymm8,%ymm8
0660 vpaddd %ymm25,%ymm9,%ymm9
0661 vpaddd %ymm26,%ymm10,%ymm10
0662 vpaddd %ymm27,%ymm11,%ymm11
0663 vpaddd %ymm28,%ymm12,%ymm12
0664 vpaddd %ymm29,%ymm13,%ymm13
0665 vpaddd %ymm30,%ymm14,%ymm14
0666 vpaddd %ymm31,%ymm15,%ymm15
0667
0668 # interleave 32-bit words in state n, n+1
0669 vpunpckldq %ymm1,%ymm0,%ymm16
0670 vpunpckhdq %ymm1,%ymm0,%ymm17
0671 vpunpckldq %ymm3,%ymm2,%ymm18
0672 vpunpckhdq %ymm3,%ymm2,%ymm19
0673 vpunpckldq %ymm5,%ymm4,%ymm20
0674 vpunpckhdq %ymm5,%ymm4,%ymm21
0675 vpunpckldq %ymm7,%ymm6,%ymm22
0676 vpunpckhdq %ymm7,%ymm6,%ymm23
0677 vpunpckldq %ymm9,%ymm8,%ymm24
0678 vpunpckhdq %ymm9,%ymm8,%ymm25
0679 vpunpckldq %ymm11,%ymm10,%ymm26
0680 vpunpckhdq %ymm11,%ymm10,%ymm27
0681 vpunpckldq %ymm13,%ymm12,%ymm28
0682 vpunpckhdq %ymm13,%ymm12,%ymm29
0683 vpunpckldq %ymm15,%ymm14,%ymm30
0684 vpunpckhdq %ymm15,%ymm14,%ymm31
0685
0686 # interleave 64-bit words in state n, n+2
0687 vpunpcklqdq %ymm18,%ymm16,%ymm0
0688 vpunpcklqdq %ymm19,%ymm17,%ymm1
0689 vpunpckhqdq %ymm18,%ymm16,%ymm2
0690 vpunpckhqdq %ymm19,%ymm17,%ymm3
0691 vpunpcklqdq %ymm22,%ymm20,%ymm4
0692 vpunpcklqdq %ymm23,%ymm21,%ymm5
0693 vpunpckhqdq %ymm22,%ymm20,%ymm6
0694 vpunpckhqdq %ymm23,%ymm21,%ymm7
0695 vpunpcklqdq %ymm26,%ymm24,%ymm8
0696 vpunpcklqdq %ymm27,%ymm25,%ymm9
0697 vpunpckhqdq %ymm26,%ymm24,%ymm10
0698 vpunpckhqdq %ymm27,%ymm25,%ymm11
0699 vpunpcklqdq %ymm30,%ymm28,%ymm12
0700 vpunpcklqdq %ymm31,%ymm29,%ymm13
0701 vpunpckhqdq %ymm30,%ymm28,%ymm14
0702 vpunpckhqdq %ymm31,%ymm29,%ymm15
0703
0704 # interleave 128-bit words in state n, n+4
0705 # xor/write first four blocks
0706 vmovdqa64 %ymm0,%ymm16
0707 vperm2i128 $0x20,%ymm4,%ymm0,%ymm0
0708 cmp $0x0020,%rcx
0709 jl .Lxorpart8
0710 vpxord 0x0000(%rdx),%ymm0,%ymm0
0711 vmovdqu64 %ymm0,0x0000(%rsi)
0712 vmovdqa64 %ymm16,%ymm0
0713 vperm2i128 $0x31,%ymm4,%ymm0,%ymm4
0714
0715 vperm2i128 $0x20,%ymm12,%ymm8,%ymm0
0716 cmp $0x0040,%rcx
0717 jl .Lxorpart8
0718 vpxord 0x0020(%rdx),%ymm0,%ymm0
0719 vmovdqu64 %ymm0,0x0020(%rsi)
0720 vperm2i128 $0x31,%ymm12,%ymm8,%ymm12
0721
0722 vperm2i128 $0x20,%ymm6,%ymm2,%ymm0
0723 cmp $0x0060,%rcx
0724 jl .Lxorpart8
0725 vpxord 0x0040(%rdx),%ymm0,%ymm0
0726 vmovdqu64 %ymm0,0x0040(%rsi)
0727 vperm2i128 $0x31,%ymm6,%ymm2,%ymm6
0728
0729 vperm2i128 $0x20,%ymm14,%ymm10,%ymm0
0730 cmp $0x0080,%rcx
0731 jl .Lxorpart8
0732 vpxord 0x0060(%rdx),%ymm0,%ymm0
0733 vmovdqu64 %ymm0,0x0060(%rsi)
0734 vperm2i128 $0x31,%ymm14,%ymm10,%ymm14
0735
0736 vperm2i128 $0x20,%ymm5,%ymm1,%ymm0
0737 cmp $0x00a0,%rcx
0738 jl .Lxorpart8
0739 vpxord 0x0080(%rdx),%ymm0,%ymm0
0740 vmovdqu64 %ymm0,0x0080(%rsi)
0741 vperm2i128 $0x31,%ymm5,%ymm1,%ymm5
0742
0743 vperm2i128 $0x20,%ymm13,%ymm9,%ymm0
0744 cmp $0x00c0,%rcx
0745 jl .Lxorpart8
0746 vpxord 0x00a0(%rdx),%ymm0,%ymm0
0747 vmovdqu64 %ymm0,0x00a0(%rsi)
0748 vperm2i128 $0x31,%ymm13,%ymm9,%ymm13
0749
0750 vperm2i128 $0x20,%ymm7,%ymm3,%ymm0
0751 cmp $0x00e0,%rcx
0752 jl .Lxorpart8
0753 vpxord 0x00c0(%rdx),%ymm0,%ymm0
0754 vmovdqu64 %ymm0,0x00c0(%rsi)
0755 vperm2i128 $0x31,%ymm7,%ymm3,%ymm7
0756
0757 vperm2i128 $0x20,%ymm15,%ymm11,%ymm0
0758 cmp $0x0100,%rcx
0759 jl .Lxorpart8
0760 vpxord 0x00e0(%rdx),%ymm0,%ymm0
0761 vmovdqu64 %ymm0,0x00e0(%rsi)
0762 vperm2i128 $0x31,%ymm15,%ymm11,%ymm15
0763
0764 # xor remaining blocks, write to output
0765 vmovdqa64 %ymm4,%ymm0
0766 cmp $0x0120,%rcx
0767 jl .Lxorpart8
0768 vpxord 0x0100(%rdx),%ymm0,%ymm0
0769 vmovdqu64 %ymm0,0x0100(%rsi)
0770
0771 vmovdqa64 %ymm12,%ymm0
0772 cmp $0x0140,%rcx
0773 jl .Lxorpart8
0774 vpxord 0x0120(%rdx),%ymm0,%ymm0
0775 vmovdqu64 %ymm0,0x0120(%rsi)
0776
0777 vmovdqa64 %ymm6,%ymm0
0778 cmp $0x0160,%rcx
0779 jl .Lxorpart8
0780 vpxord 0x0140(%rdx),%ymm0,%ymm0
0781 vmovdqu64 %ymm0,0x0140(%rsi)
0782
0783 vmovdqa64 %ymm14,%ymm0
0784 cmp $0x0180,%rcx
0785 jl .Lxorpart8
0786 vpxord 0x0160(%rdx),%ymm0,%ymm0
0787 vmovdqu64 %ymm0,0x0160(%rsi)
0788
0789 vmovdqa64 %ymm5,%ymm0
0790 cmp $0x01a0,%rcx
0791 jl .Lxorpart8
0792 vpxord 0x0180(%rdx),%ymm0,%ymm0
0793 vmovdqu64 %ymm0,0x0180(%rsi)
0794
0795 vmovdqa64 %ymm13,%ymm0
0796 cmp $0x01c0,%rcx
0797 jl .Lxorpart8
0798 vpxord 0x01a0(%rdx),%ymm0,%ymm0
0799 vmovdqu64 %ymm0,0x01a0(%rsi)
0800
0801 vmovdqa64 %ymm7,%ymm0
0802 cmp $0x01e0,%rcx
0803 jl .Lxorpart8
0804 vpxord 0x01c0(%rdx),%ymm0,%ymm0
0805 vmovdqu64 %ymm0,0x01c0(%rsi)
0806
0807 vmovdqa64 %ymm15,%ymm0
0808 cmp $0x0200,%rcx
0809 jl .Lxorpart8
0810 vpxord 0x01e0(%rdx),%ymm0,%ymm0
0811 vmovdqu64 %ymm0,0x01e0(%rsi)
0812
0813 .Ldone8:
0814 vzeroupper
0815 RET
0816
0817 .Lxorpart8:
0818 # xor remaining bytes from partial register into output
0819 mov %rcx,%rax
0820 and $0x1f,%rcx
0821 jz .Ldone8
0822 mov %rax,%r9
0823 and $~0x1f,%r9
0824
0825 mov $1,%rax
0826 shld %cl,%rax,%rax
0827 sub $1,%rax
0828 kmovq %rax,%k1
0829
0830 vmovdqu8 (%rdx,%r9),%ymm1{%k1}{z}
0831 vpxord %ymm0,%ymm1,%ymm1
0832 vmovdqu8 %ymm1,(%rsi,%r9){%k1}
0833
0834 jmp .Ldone8
0835
0836 SYM_FUNC_END(chacha_8block_xor_avx512vl)