0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/linkage.h>
0015 #include <asm/frame.h>
0016
0017
0018
0019 #define state_h0 0
0020 #define state_h1 4
0021 #define state_h2 8
0022 #define state_h3 12
0023 #define state_h4 16
0024 #define state_h5 20
0025 #define state_h6 24
0026 #define state_h7 28
0027
0028
0029
0030
0031
0032 #define K0 2043430169
0033 #define K1 -208106958
0034 #define K2 -416213915
0035 #define K3 -832427829
0036 #define K4 -1664855657
0037 #define K5 965255983
0038 #define K6 1930511966
0039 #define K7 -433943364
0040 #define K8 -867886727
0041 #define K9 -1735773453
0042 #define K10 823420391
0043 #define K11 1646840782
0044 #define K12 -1001285732
0045 #define K13 -2002571463
0046 #define K14 289824371
0047 #define K15 579648742
0048 #define K16 -1651869049
0049 #define K17 991229199
0050 #define K18 1982458398
0051 #define K19 -330050500
0052 #define K20 -660100999
0053 #define K21 -1320201997
0054 #define K22 1654563303
0055 #define K23 -985840690
0056 #define K24 -1971681379
0057 #define K25 351604539
0058 #define K26 703209078
0059 #define K27 1406418156
0060 #define K28 -1482130984
0061 #define K29 1330705329
0062 #define K30 -1633556638
0063 #define K31 1027854021
0064 #define K32 2055708042
0065 #define K33 -183551212
0066 #define K34 -367102423
0067 #define K35 -734204845
0068 #define K36 -1468409689
0069 #define K37 1358147919
0070 #define K38 -1578671458
0071 #define K39 1137624381
0072 #define K40 -2019718534
0073 #define K41 255530229
0074 #define K42 511060458
0075 #define K43 1022120916
0076 #define K44 2044241832
0077 #define K45 -206483632
0078 #define K46 -412967263
0079 #define K47 -825934525
0080 #define K48 -1651869049
0081 #define K49 991229199
0082 #define K50 1982458398
0083 #define K51 -330050500
0084 #define K52 -660100999
0085 #define K53 -1320201997
0086 #define K54 1654563303
0087 #define K55 -985840690
0088 #define K56 -1971681379
0089 #define K57 351604539
0090 #define K58 703209078
0091 #define K59 1406418156
0092 #define K60 -1482130984
0093 #define K61 1330705329
0094 #define K62 -1633556638
0095 #define K63 1027854021
0096
0097
0098
0099 #define RSTATE %rdi
0100 #define RDATA %rsi
0101 #define RNBLKS %rdx
0102
0103 #define t0 %eax
0104 #define t1 %ebx
0105 #define t2 %ecx
0106
0107 #define a %r8d
0108 #define b %r9d
0109 #define c %r10d
0110 #define d %r11d
0111 #define e %r12d
0112 #define f %r13d
0113 #define g %r14d
0114 #define h %r15d
0115
0116 #define W0 %xmm0
0117 #define W1 %xmm1
0118 #define W2 %xmm2
0119 #define W3 %xmm3
0120 #define W4 %xmm4
0121 #define W5 %xmm5
0122
0123 #define XTMP0 %xmm6
0124 #define XTMP1 %xmm7
0125 #define XTMP2 %xmm8
0126 #define XTMP3 %xmm9
0127 #define XTMP4 %xmm10
0128 #define XTMP5 %xmm11
0129 #define XTMP6 %xmm12
0130
0131 #define BSWAP_REG %xmm15
0132
0133
0134
0135 #define STACK_W_SIZE (32 * 2 * 3)
0136 #define STACK_REG_SAVE_SIZE (64)
0137
0138 #define STACK_W (0)
0139 #define STACK_REG_SAVE (STACK_W + STACK_W_SIZE)
0140 #define STACK_SIZE (STACK_REG_SAVE + STACK_REG_SAVE_SIZE)
0141
0142
0143
0144 #define roll2(v, reg) \
0145 roll $(v), reg;
0146
0147 #define roll3mov(v, src, dst) \
0148 movl src, dst; \
0149 roll $(v), dst;
0150
0151 #define roll3(v, src, dst) \
0152 rorxl $(32-(v)), src, dst;
0153
0154 #define addl2(a, out) \
0155 leal (a, out), out;
0156
0157
0158
0159 #define GG1(x, y, z, o, t) \
0160 movl x, o; \
0161 xorl y, o; \
0162 xorl z, o;
0163
0164 #define FF1(x, y, z, o, t) GG1(x, y, z, o, t)
0165
0166 #define GG2(x, y, z, o, t) \
0167 andnl z, x, o; \
0168 movl y, t; \
0169 andl x, t; \
0170 addl2(t, o);
0171
0172 #define FF2(x, y, z, o, t) \
0173 movl y, o; \
0174 xorl x, o; \
0175 movl y, t; \
0176 andl x, t; \
0177 andl z, o; \
0178 xorl t, o;
0179
0180 #define R(i, a, b, c, d, e, f, g, h, round, widx, wtype) \
0181 \
0182 roll3mov(12, a, t0); \
0183 \
0184 leal K##round(t0, e, 1), t1; \
0185 roll2(7, t1); \
0186 \
0187 addl wtype##_W1_ADDR(round, widx), h; \
0188 \
0189 addl2(t1, h); \
0190 \
0191 xorl t1, t0; \
0192 \
0193 addl wtype##_W1W2_ADDR(round, widx), d; \
0194 \
0195 FF##i(a, b, c, t1, t2); \
0196 \
0197 addl2(t1, d); \
0198 \
0199 GG##i(e, f, g, t2, t1); \
0200 \
0201 addl2(t2, h); \
0202 \
0203 roll2(19, f); \
0204 \
0205 addl2(t0, d); \
0206 \
0207 roll2(9, b); \
0208 \
0209 roll3(9, h, t2); \
0210 roll3(17, h, t1); \
0211 xorl t2, h; \
0212 xorl t1, h;
0213
0214 #define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \
0215 R(1, a, b, c, d, e, f, g, h, round, widx, wtype)
0216
0217 #define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \
0218 R(2, a, b, c, d, e, f, g, h, round, widx, wtype)
0219
0220
0221
0222
0223 #define IW_W_ADDR(round, widx, offs) \
0224 (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp)
0225
0226
0227 #define XW_W_ADDR(round, widx, offs) \
0228 (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp)
0229
0230
0231 #define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 0)
0232 #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
0233
0234
0235 #define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0)
0236 #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32)
0237
0238
0239 #define LOAD_W_XMM_1() \
0240 vmovdqu 0*16(RDATA), XTMP0; \
0241 vmovdqu 1*16(RDATA), XTMP1; \
0242 vmovdqu 2*16(RDATA), XTMP2; \
0243 vmovdqu 3*16(RDATA), XTMP3; \
0244 vpshufb BSWAP_REG, XTMP0, XTMP0; \
0245 vpshufb BSWAP_REG, XTMP1, XTMP1; \
0246 vpshufb BSWAP_REG, XTMP2, XTMP2; \
0247 vpshufb BSWAP_REG, XTMP3, XTMP3; \
0248 vpxor XTMP0, XTMP1, XTMP4; \
0249 vpxor XTMP1, XTMP2, XTMP5; \
0250 vpxor XTMP2, XTMP3, XTMP6; \
0251 leaq 64(RDATA), RDATA; \
0252 vmovdqa XTMP0, IW_W1_ADDR(0, 0); \
0253 vmovdqa XTMP4, IW_W1W2_ADDR(0, 0); \
0254 vmovdqa XTMP1, IW_W1_ADDR(4, 0); \
0255 vmovdqa XTMP5, IW_W1W2_ADDR(4, 0);
0256
0257 #define LOAD_W_XMM_2() \
0258 vmovdqa XTMP2, IW_W1_ADDR(8, 0); \
0259 vmovdqa XTMP6, IW_W1W2_ADDR(8, 0);
0260
0261 #define LOAD_W_XMM_3() \
0262 vpshufd $0b00000000, XTMP0, W0; \
0263 vpshufd $0b11111001, XTMP0, W1; \
0264 vmovdqa XTMP1, W2; \
0265 vpalignr $12, XTMP1, XTMP2, W3; \
0266 vpalignr $8, XTMP2, XTMP3, W4; \
0267 vpshufd $0b11111001, XTMP3, W5;
0268
0269
0270 #define SCHED_W_0(round, w0, w1, w2, w3, w4, w5) \
0271 \
0272 vpshufd $0b10111111, w0, XTMP0; \
0273 vpalignr $12, XTMP0, w1, XTMP0; \
0274 \
0275 vpshufd $0b10111111, w1, XTMP1; \
0276 vpalignr $12, XTMP1, w2, XTMP1; \
0277 \
0278 \
0279 vpxor w3, XTMP0, XTMP0;
0280
0281 #define SCHED_W_1(round, w0, w1, w2, w3, w4, w5) \
0282 \
0283 \
0284 vpslld $15, w5, XTMP2; \
0285 vpsrld $(32-15), w5, XTMP3; \
0286 vpxor XTMP2, XTMP3, XTMP3; \
0287 vpxor XTMP3, XTMP0, XTMP0; \
0288 \
0289 vpslld $7, XTMP1, XTMP5; \
0290 vpsrld $(32-7), XTMP1, XTMP1; \
0291 vpxor XTMP5, XTMP1, XTMP1; \
0292 \
0293 vpxor w4, XTMP1, XTMP1; \
0294 \
0295 \
0296 vpslld $15, XTMP0, XTMP5; \
0297 vpsrld $(32-15), XTMP0, XTMP6; \
0298 vpslld $23, XTMP0, XTMP2; \
0299 vpsrld $(32-23), XTMP0, XTMP3; \
0300 vpxor XTMP0, XTMP1, XTMP1; \
0301 vpxor XTMP6, XTMP5, XTMP5; \
0302 vpxor XTMP3, XTMP2, XTMP2; \
0303 vpxor XTMP2, XTMP5, XTMP5; \
0304 vpxor XTMP5, XTMP1, w0;
0305
0306 #define SCHED_W_2(round, w0, w1, w2, w3, w4, w5) \
0307 \
0308 vpshufd $0b10111111, w4, XTMP4; \
0309 vpalignr $12, XTMP4, w5, XTMP4; \
0310 vmovdqa XTMP4, XW_W1_ADDR((round), 0); \
0311 \
0312 vpxor w0, XTMP4, XTMP1; \
0313 vmovdqa XTMP1, XW_W1W2_ADDR((round), 0);
0314
0315
0316 .section .rodata.cst16, "aM", @progbits, 16
0317 .align 16
0318
0319 .Lbe32mask:
0320 .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
0321
0322 .text
0323
0324
0325
0326
0327
0328
0329
0330 .align 16
0331 SYM_FUNC_START(sm3_transform_avx)
0332
0333
0334
0335
0336
0337 vzeroupper;
0338
0339 pushq %rbp;
0340 movq %rsp, %rbp;
0341
0342 movq %rdx, RNBLKS;
0343
0344 subq $STACK_SIZE, %rsp;
0345 andq $(~63), %rsp;
0346
0347 movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp);
0348 movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp);
0349 movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp);
0350 movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp);
0351 movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp);
0352
0353 vmovdqa .Lbe32mask (%rip), BSWAP_REG;
0354
0355
0356 movl state_h0(RSTATE), a;
0357 movl state_h1(RSTATE), b;
0358 movl state_h2(RSTATE), c;
0359 movl state_h3(RSTATE), d;
0360 movl state_h4(RSTATE), e;
0361 movl state_h5(RSTATE), f;
0362 movl state_h6(RSTATE), g;
0363 movl state_h7(RSTATE), h;
0364
0365 .align 16
0366 .Loop:
0367
0368 LOAD_W_XMM_1();
0369
0370 leaq -1(RNBLKS), RNBLKS;
0371
0372
0373 R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2();
0374 R1(d, a, b, c, h, e, f, g, 1, 1, IW);
0375 R1(c, d, a, b, g, h, e, f, 2, 2, IW);
0376 R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3();
0377
0378
0379 R1(a, b, c, d, e, f, g, h, 4, 0, IW);
0380 R1(d, a, b, c, h, e, f, g, 5, 1, IW);
0381 R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5);
0382 R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5);
0383
0384
0385 R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5);
0386 R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0);
0387 R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0);
0388 R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0);
0389
0390
0391 R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1);
0392 R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1);
0393 R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1);
0394
0395
0396 R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2);
0397 R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2);
0398 R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2);
0399
0400
0401 R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3);
0402 R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3);
0403 R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3);
0404
0405
0406 R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4);
0407 R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4);
0408 R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4);
0409
0410
0411 R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5);
0412 R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5);
0413 R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5);
0414
0415
0416 R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0);
0417 R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0);
0418 R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0);
0419
0420
0421 R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1);
0422 R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1);
0423 R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1);
0424
0425
0426 R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2);
0427 R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2);
0428 R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2);
0429
0430
0431 R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3);
0432 R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3);
0433 R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3);
0434
0435
0436 R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4);
0437 R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4);
0438 R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4);
0439
0440
0441 R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5);
0442 R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5);
0443 R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5);
0444
0445
0446 R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0);
0447 R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0);
0448 R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0);
0449
0450
0451 R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1);
0452 R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1);
0453 R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1);
0454
0455
0456 R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2);
0457 R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2);
0458 R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2);
0459
0460
0461 R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3);
0462 R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3);
0463 R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3);
0464
0465
0466 R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4);
0467 R2(c, d, a, b, g, h, e, f, 58, 1, XW);
0468 R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4);
0469
0470
0471 R2(a, b, c, d, e, f, g, h, 60, 0, XW);
0472 R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4);
0473 R2(c, d, a, b, g, h, e, f, 62, 2, XW);
0474
0475
0476 R2(b, c, d, a, f, g, h, e, 63, 0, XW);
0477
0478
0479 xorl state_h0(RSTATE), a;
0480 xorl state_h1(RSTATE), b;
0481 xorl state_h2(RSTATE), c;
0482 xorl state_h3(RSTATE), d;
0483 movl a, state_h0(RSTATE);
0484 movl b, state_h1(RSTATE);
0485 movl c, state_h2(RSTATE);
0486 movl d, state_h3(RSTATE);
0487 xorl state_h4(RSTATE), e;
0488 xorl state_h5(RSTATE), f;
0489 xorl state_h6(RSTATE), g;
0490 xorl state_h7(RSTATE), h;
0491 movl e, state_h4(RSTATE);
0492 movl f, state_h5(RSTATE);
0493 movl g, state_h6(RSTATE);
0494 movl h, state_h7(RSTATE);
0495
0496 cmpq $0, RNBLKS;
0497 jne .Loop;
0498
0499 vzeroall;
0500
0501 movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx;
0502 movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15;
0503 movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14;
0504 movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13;
0505 movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12;
0506
0507 vmovdqa %xmm0, IW_W1_ADDR(0, 0);
0508 vmovdqa %xmm0, IW_W1W2_ADDR(0, 0);
0509 vmovdqa %xmm0, IW_W1_ADDR(4, 0);
0510 vmovdqa %xmm0, IW_W1W2_ADDR(4, 0);
0511 vmovdqa %xmm0, IW_W1_ADDR(8, 0);
0512 vmovdqa %xmm0, IW_W1W2_ADDR(8, 0);
0513
0514 movq %rbp, %rsp;
0515 popq %rbp;
0516 RET;
0517 SYM_FUNC_END(sm3_transform_avx)