Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * SM3 AVX accelerated transform.
0004  * specified in: https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
0005  *
0006  * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
0007  * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
0008  */
0009 
0010 /* Based on SM3 AES/BMI2 accelerated work by libgcrypt at:
0011  *  https://gnupg.org/software/libgcrypt/index.html
0012  */
0013 
0014 #include <linux/linkage.h>
0015 #include <asm/frame.h>
0016 
0017 /* Context structure */
0018 
0019 #define state_h0 0
0020 #define state_h1 4
0021 #define state_h2 8
0022 #define state_h3 12
0023 #define state_h4 16
0024 #define state_h5 20
0025 #define state_h6 24
0026 #define state_h7 28
0027 
0028 /* Constants */
0029 
0030 /* Round constant macros */
0031 
0032 #define K0   2043430169  /* 0x79cc4519 */
0033 #define K1   -208106958  /* 0xf3988a32 */
0034 #define K2   -416213915  /* 0xe7311465 */
0035 #define K3   -832427829  /* 0xce6228cb */
0036 #define K4  -1664855657  /* 0x9cc45197 */
0037 #define K5    965255983  /* 0x3988a32f */
0038 #define K6   1930511966  /* 0x7311465e */
0039 #define K7   -433943364  /* 0xe6228cbc */
0040 #define K8   -867886727  /* 0xcc451979 */
0041 #define K9  -1735773453  /* 0x988a32f3 */
0042 #define K10   823420391  /* 0x311465e7 */
0043 #define K11  1646840782  /* 0x6228cbce */
0044 #define K12 -1001285732  /* 0xc451979c */
0045 #define K13 -2002571463  /* 0x88a32f39 */
0046 #define K14   289824371  /* 0x11465e73 */
0047 #define K15   579648742  /* 0x228cbce6 */
0048 #define K16 -1651869049  /* 0x9d8a7a87 */
0049 #define K17   991229199  /* 0x3b14f50f */
0050 #define K18  1982458398  /* 0x7629ea1e */
0051 #define K19  -330050500  /* 0xec53d43c */
0052 #define K20  -660100999  /* 0xd8a7a879 */
0053 #define K21 -1320201997  /* 0xb14f50f3 */
0054 #define K22  1654563303  /* 0x629ea1e7 */
0055 #define K23  -985840690  /* 0xc53d43ce */
0056 #define K24 -1971681379  /* 0x8a7a879d */
0057 #define K25   351604539  /* 0x14f50f3b */
0058 #define K26   703209078  /* 0x29ea1e76 */
0059 #define K27  1406418156  /* 0x53d43cec */
0060 #define K28 -1482130984  /* 0xa7a879d8 */
0061 #define K29  1330705329  /* 0x4f50f3b1 */
0062 #define K30 -1633556638  /* 0x9ea1e762 */
0063 #define K31  1027854021  /* 0x3d43cec5 */
0064 #define K32  2055708042  /* 0x7a879d8a */
0065 #define K33  -183551212  /* 0xf50f3b14 */
0066 #define K34  -367102423  /* 0xea1e7629 */
0067 #define K35  -734204845  /* 0xd43cec53 */
0068 #define K36 -1468409689  /* 0xa879d8a7 */
0069 #define K37  1358147919  /* 0x50f3b14f */
0070 #define K38 -1578671458  /* 0xa1e7629e */
0071 #define K39  1137624381  /* 0x43cec53d */
0072 #define K40 -2019718534  /* 0x879d8a7a */
0073 #define K41   255530229  /* 0x0f3b14f5 */
0074 #define K42   511060458  /* 0x1e7629ea */
0075 #define K43  1022120916  /* 0x3cec53d4 */
0076 #define K44  2044241832  /* 0x79d8a7a8 */
0077 #define K45  -206483632  /* 0xf3b14f50 */
0078 #define K46  -412967263  /* 0xe7629ea1 */
0079 #define K47  -825934525  /* 0xcec53d43 */
0080 #define K48 -1651869049  /* 0x9d8a7a87 */
0081 #define K49   991229199  /* 0x3b14f50f */
0082 #define K50  1982458398  /* 0x7629ea1e */
0083 #define K51  -330050500  /* 0xec53d43c */
0084 #define K52  -660100999  /* 0xd8a7a879 */
0085 #define K53 -1320201997  /* 0xb14f50f3 */
0086 #define K54  1654563303  /* 0x629ea1e7 */
0087 #define K55  -985840690  /* 0xc53d43ce */
0088 #define K56 -1971681379  /* 0x8a7a879d */
0089 #define K57   351604539  /* 0x14f50f3b */
0090 #define K58   703209078  /* 0x29ea1e76 */
0091 #define K59  1406418156  /* 0x53d43cec */
0092 #define K60 -1482130984  /* 0xa7a879d8 */
0093 #define K61  1330705329  /* 0x4f50f3b1 */
0094 #define K62 -1633556638  /* 0x9ea1e762 */
0095 #define K63  1027854021  /* 0x3d43cec5 */
0096 
0097 /* Register macros */
0098 
0099 #define RSTATE %rdi
0100 #define RDATA  %rsi
0101 #define RNBLKS %rdx
0102 
0103 #define t0 %eax
0104 #define t1 %ebx
0105 #define t2 %ecx
0106 
0107 #define a %r8d
0108 #define b %r9d
0109 #define c %r10d
0110 #define d %r11d
0111 #define e %r12d
0112 #define f %r13d
0113 #define g %r14d
0114 #define h %r15d
0115 
0116 #define W0 %xmm0
0117 #define W1 %xmm1
0118 #define W2 %xmm2
0119 #define W3 %xmm3
0120 #define W4 %xmm4
0121 #define W5 %xmm5
0122 
0123 #define XTMP0 %xmm6
0124 #define XTMP1 %xmm7
0125 #define XTMP2 %xmm8
0126 #define XTMP3 %xmm9
0127 #define XTMP4 %xmm10
0128 #define XTMP5 %xmm11
0129 #define XTMP6 %xmm12
0130 
0131 #define BSWAP_REG %xmm15
0132 
0133 /* Stack structure */
0134 
0135 #define STACK_W_SIZE        (32 * 2 * 3)
0136 #define STACK_REG_SAVE_SIZE (64)
0137 
0138 #define STACK_W             (0)
0139 #define STACK_REG_SAVE      (STACK_W + STACK_W_SIZE)
0140 #define STACK_SIZE          (STACK_REG_SAVE + STACK_REG_SAVE_SIZE)
0141 
0142 /* Instruction helpers. */
0143 
0144 #define roll2(v, reg)       \
0145     roll $(v), reg;
0146 
0147 #define roll3mov(v, src, dst)   \
0148     movl src, dst;      \
0149     roll $(v), dst;
0150 
0151 #define roll3(v, src, dst)  \
0152     rorxl $(32-(v)), src, dst;
0153 
0154 #define addl2(a, out)       \
0155     leal (a, out), out;
0156 
0157 /* Round function macros. */
0158 
0159 #define GG1(x, y, z, o, t)  \
0160     movl x, o;      \
0161     xorl y, o;      \
0162     xorl z, o;
0163 
0164 #define FF1(x, y, z, o, t) GG1(x, y, z, o, t)
0165 
0166 #define GG2(x, y, z, o, t)  \
0167     andnl z, x, o;      \
0168     movl y, t;      \
0169     andl x, t;      \
0170     addl2(t, o);
0171 
0172 #define FF2(x, y, z, o, t)  \
0173     movl y, o;      \
0174     xorl x, o;      \
0175     movl y, t;      \
0176     andl x, t;      \
0177     andl z, o;      \
0178     xorl t, o;
0179 
0180 #define R(i, a, b, c, d, e, f, g, h, round, widx, wtype)        \
0181     /* rol(a, 12) => t0 */                      \
0182     roll3mov(12, a, t0); /* rorxl here would reduce perf by 6% on zen3 */ \
0183     /* rol (t0 + e + t), 7) => t1 */                \
0184     leal K##round(t0, e, 1), t1;                    \
0185     roll2(7, t1);                           \
0186     /* h + w1 => h */                       \
0187     addl wtype##_W1_ADDR(round, widx), h;               \
0188     /* h + t1 => h */                       \
0189     addl2(t1, h);                           \
0190     /* t1 ^ t0 => t0 */                     \
0191     xorl t1, t0;                            \
0192     /* w1w2 + d => d */                     \
0193     addl wtype##_W1W2_ADDR(round, widx), d;             \
0194     /* FF##i(a,b,c) => t1 */                    \
0195     FF##i(a, b, c, t1, t2);                     \
0196     /* d + t1 => d */                       \
0197     addl2(t1, d);                           \
0198     /* GG#i(e,f,g) => t2 */                     \
0199     GG##i(e, f, g, t2, t1);                     \
0200     /* h + t2 => h */                       \
0201     addl2(t2, h);                           \
0202     /* rol (f, 19) => f */                      \
0203     roll2(19, f);                           \
0204     /* d + t0 => d */                       \
0205     addl2(t0, d);                           \
0206     /* rol (b, 9) => b */                       \
0207     roll2(9, b);                            \
0208     /* P0(h) => h */                        \
0209     roll3(9, h, t2);                        \
0210     roll3(17, h, t1);                       \
0211     xorl t2, h;                         \
0212     xorl t1, h;
0213 
0214 #define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \
0215     R(1, a, b, c, d, e, f, g, h, round, widx, wtype)
0216 
0217 #define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \
0218     R(2, a, b, c, d, e, f, g, h, round, widx, wtype)
0219 
0220 /* Input expansion macros. */
0221 
0222 /* Byte-swapped input address. */
0223 #define IW_W_ADDR(round, widx, offs) \
0224     (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp)
0225 
0226 /* Expanded input address. */
0227 #define XW_W_ADDR(round, widx, offs) \
0228     (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp)
0229 
0230 /* Rounds 1-12, byte-swapped input block addresses. */
0231 #define IW_W1_ADDR(round, widx)   IW_W_ADDR(round, widx, 0)
0232 #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
0233 
0234 /* Rounds 1-12, expanded input block addresses. */
0235 #define XW_W1_ADDR(round, widx)   XW_W_ADDR(round, widx, 0)
0236 #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32)
0237 
0238 /* Input block loading. */
0239 #define LOAD_W_XMM_1()                          \
0240     vmovdqu 0*16(RDATA), XTMP0; /* XTMP0: w3, w2, w1, w0 */     \
0241     vmovdqu 1*16(RDATA), XTMP1; /* XTMP1: w7, w6, w5, w4 */     \
0242     vmovdqu 2*16(RDATA), XTMP2; /* XTMP2: w11, w10, w9, w8 */   \
0243     vmovdqu 3*16(RDATA), XTMP3; /* XTMP3: w15, w14, w13, w12 */ \
0244     vpshufb BSWAP_REG, XTMP0, XTMP0;                \
0245     vpshufb BSWAP_REG, XTMP1, XTMP1;                \
0246     vpshufb BSWAP_REG, XTMP2, XTMP2;                \
0247     vpshufb BSWAP_REG, XTMP3, XTMP3;                \
0248     vpxor XTMP0, XTMP1, XTMP4;                  \
0249     vpxor XTMP1, XTMP2, XTMP5;                  \
0250     vpxor XTMP2, XTMP3, XTMP6;                  \
0251     leaq 64(RDATA), RDATA;                      \
0252     vmovdqa XTMP0, IW_W1_ADDR(0, 0);                \
0253     vmovdqa XTMP4, IW_W1W2_ADDR(0, 0);              \
0254     vmovdqa XTMP1, IW_W1_ADDR(4, 0);                \
0255     vmovdqa XTMP5, IW_W1W2_ADDR(4, 0);
0256 
0257 #define LOAD_W_XMM_2()              \
0258     vmovdqa XTMP2, IW_W1_ADDR(8, 0);    \
0259     vmovdqa XTMP6, IW_W1W2_ADDR(8, 0);
0260 
0261 #define LOAD_W_XMM_3()                          \
0262     vpshufd $0b00000000, XTMP0, W0; /* W0: xx, w0, xx, xx */    \
0263     vpshufd $0b11111001, XTMP0, W1; /* W1: xx, w3, w2, w1 */    \
0264     vmovdqa XTMP1, W2;              /* W2: xx, w6, w5, w4 */    \
0265     vpalignr $12, XTMP1, XTMP2, W3; /* W3: xx, w9, w8, w7 */    \
0266     vpalignr $8, XTMP2, XTMP3, W4;  /* W4: xx, w12, w11, w10 */ \
0267     vpshufd $0b11111001, XTMP3, W5; /* W5: xx, w15, w14, w13 */
0268 
0269 /* Message scheduling. Note: 3 words per XMM register. */
0270 #define SCHED_W_0(round, w0, w1, w2, w3, w4, w5)            \
0271     /* Load (w[i - 16]) => XTMP0 */                 \
0272     vpshufd $0b10111111, w0, XTMP0;                 \
0273     vpalignr $12, XTMP0, w1, XTMP0; /* XTMP0: xx, w2, w1, w0 */ \
0274     /* Load (w[i - 13]) => XTMP1 */                 \
0275     vpshufd $0b10111111, w1, XTMP1;                 \
0276     vpalignr $12, XTMP1, w2, XTMP1;                 \
0277     /* w[i - 9] == w3 */                        \
0278     /* XMM3 ^ XTMP0 => XTMP0 */                 \
0279     vpxor w3, XTMP0, XTMP0;
0280 
0281 #define SCHED_W_1(round, w0, w1, w2, w3, w4, w5)    \
0282     /* w[i - 3] == w5 */                \
0283     /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */        \
0284     vpslld $15, w5, XTMP2;              \
0285     vpsrld $(32-15), w5, XTMP3;         \
0286     vpxor XTMP2, XTMP3, XTMP3;          \
0287     vpxor XTMP3, XTMP0, XTMP0;          \
0288     /* rol(XTMP1, 7) => XTMP1 */            \
0289     vpslld $7, XTMP1, XTMP5;            \
0290     vpsrld $(32-7), XTMP1, XTMP1;           \
0291     vpxor XTMP5, XTMP1, XTMP1;          \
0292     /* XMM4 ^ XTMP1 => XTMP1 */         \
0293     vpxor w4, XTMP1, XTMP1;             \
0294     /* w[i - 6] == XMM4 */              \
0295     /* P1(XTMP0) ^ XTMP1 => XMM0 */         \
0296     vpslld $15, XTMP0, XTMP5;           \
0297     vpsrld $(32-15), XTMP0, XTMP6;          \
0298     vpslld $23, XTMP0, XTMP2;           \
0299     vpsrld $(32-23), XTMP0, XTMP3;          \
0300     vpxor XTMP0, XTMP1, XTMP1;          \
0301     vpxor XTMP6, XTMP5, XTMP5;          \
0302     vpxor XTMP3, XTMP2, XTMP2;          \
0303     vpxor XTMP2, XTMP5, XTMP5;          \
0304     vpxor XTMP5, XTMP1, w0;
0305 
0306 #define SCHED_W_2(round, w0, w1, w2, w3, w4, w5)    \
0307     /* W1 in XMM12 */               \
0308     vpshufd $0b10111111, w4, XTMP4;         \
0309     vpalignr $12, XTMP4, w5, XTMP4;         \
0310     vmovdqa XTMP4, XW_W1_ADDR((round), 0);      \
0311     /* W1 ^ W2 => XTMP1 */              \
0312     vpxor w0, XTMP4, XTMP1;             \
0313     vmovdqa XTMP1, XW_W1W2_ADDR((round), 0);
0314 
0315 
0316 .section    .rodata.cst16, "aM", @progbits, 16
0317 .align 16
0318 
0319 .Lbe32mask:
0320     .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f
0321 
0322 .text
0323 
0324 /*
0325  * Transform nblocks*64 bytes (nblocks*16 32-bit words) at DATA.
0326  *
0327  * void sm3_transform_avx(struct sm3_state *state,
0328  *                        const u8 *data, int nblocks);
0329  */
0330 .align 16
0331 SYM_FUNC_START(sm3_transform_avx)
0332     /* input:
0333      *  %rdi: ctx, CTX
0334      *  %rsi: data (64*nblks bytes)
0335      *  %rdx: nblocks
0336      */
0337     vzeroupper;
0338 
0339     pushq %rbp;
0340     movq %rsp, %rbp;
0341 
0342     movq %rdx, RNBLKS;
0343 
0344     subq $STACK_SIZE, %rsp;
0345     andq $(~63), %rsp;
0346 
0347     movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp);
0348     movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp);
0349     movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp);
0350     movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp);
0351     movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp);
0352 
0353     vmovdqa .Lbe32mask (%rip), BSWAP_REG;
0354 
0355     /* Get the values of the chaining variables. */
0356     movl state_h0(RSTATE), a;
0357     movl state_h1(RSTATE), b;
0358     movl state_h2(RSTATE), c;
0359     movl state_h3(RSTATE), d;
0360     movl state_h4(RSTATE), e;
0361     movl state_h5(RSTATE), f;
0362     movl state_h6(RSTATE), g;
0363     movl state_h7(RSTATE), h;
0364 
0365 .align 16
0366 .Loop:
0367     /* Load data part1. */
0368     LOAD_W_XMM_1();
0369 
0370     leaq -1(RNBLKS), RNBLKS;
0371 
0372     /* Transform 0-3 + Load data part2. */
0373     R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2();
0374     R1(d, a, b, c, h, e, f, g, 1, 1, IW);
0375     R1(c, d, a, b, g, h, e, f, 2, 2, IW);
0376     R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3();
0377 
0378     /* Transform 4-7 + Precalc 12-14. */
0379     R1(a, b, c, d, e, f, g, h, 4, 0, IW);
0380     R1(d, a, b, c, h, e, f, g, 5, 1, IW);
0381     R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5);
0382     R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5);
0383 
0384     /* Transform 8-11 + Precalc 12-17. */
0385     R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5);
0386     R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0);
0387     R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0);
0388     R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0);
0389 
0390     /* Transform 12-14 + Precalc 18-20 */
0391     R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1);
0392     R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1);
0393     R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1);
0394 
0395     /* Transform 15-17 + Precalc 21-23 */
0396     R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2);
0397     R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2);
0398     R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2);
0399 
0400     /* Transform 18-20 + Precalc 24-26 */
0401     R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3);
0402     R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3);
0403     R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3);
0404 
0405     /* Transform 21-23 + Precalc 27-29 */
0406     R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4);
0407     R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4);
0408     R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4);
0409 
0410     /* Transform 24-26 + Precalc 30-32 */
0411     R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5);
0412     R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5);
0413     R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5);
0414 
0415     /* Transform 27-29 + Precalc 33-35 */
0416     R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0);
0417     R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0);
0418     R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0);
0419 
0420     /* Transform 30-32 + Precalc 36-38 */
0421     R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1);
0422     R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1);
0423     R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1);
0424 
0425     /* Transform 33-35 + Precalc 39-41 */
0426     R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2);
0427     R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2);
0428     R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2);
0429 
0430     /* Transform 36-38 + Precalc 42-44 */
0431     R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3);
0432     R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3);
0433     R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3);
0434 
0435     /* Transform 39-41 + Precalc 45-47 */
0436     R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4);
0437     R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4);
0438     R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4);
0439 
0440     /* Transform 42-44 + Precalc 48-50 */
0441     R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5);
0442     R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5);
0443     R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5);
0444 
0445     /* Transform 45-47 + Precalc 51-53 */
0446     R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0);
0447     R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0);
0448     R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0);
0449 
0450     /* Transform 48-50 + Precalc 54-56 */
0451     R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1);
0452     R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1);
0453     R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1);
0454 
0455     /* Transform 51-53 + Precalc 57-59 */
0456     R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2);
0457     R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2);
0458     R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2);
0459 
0460     /* Transform 54-56 + Precalc 60-62 */
0461     R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3);
0462     R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3);
0463     R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3);
0464 
0465     /* Transform 57-59 + Precalc 63 */
0466     R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4);
0467     R2(c, d, a, b, g, h, e, f, 58, 1, XW);
0468     R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4);
0469 
0470     /* Transform 60-62 + Precalc 63 */
0471     R2(a, b, c, d, e, f, g, h, 60, 0, XW);
0472     R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4);
0473     R2(c, d, a, b, g, h, e, f, 62, 2, XW);
0474 
0475     /* Transform 63 */
0476     R2(b, c, d, a, f, g, h, e, 63, 0, XW);
0477 
0478     /* Update the chaining variables. */
0479     xorl state_h0(RSTATE), a;
0480     xorl state_h1(RSTATE), b;
0481     xorl state_h2(RSTATE), c;
0482     xorl state_h3(RSTATE), d;
0483     movl a, state_h0(RSTATE);
0484     movl b, state_h1(RSTATE);
0485     movl c, state_h2(RSTATE);
0486     movl d, state_h3(RSTATE);
0487     xorl state_h4(RSTATE), e;
0488     xorl state_h5(RSTATE), f;
0489     xorl state_h6(RSTATE), g;
0490     xorl state_h7(RSTATE), h;
0491     movl e, state_h4(RSTATE);
0492     movl f, state_h5(RSTATE);
0493     movl g, state_h6(RSTATE);
0494     movl h, state_h7(RSTATE);
0495 
0496     cmpq $0, RNBLKS;
0497     jne .Loop;
0498 
0499     vzeroall;
0500 
0501     movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx;
0502     movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15;
0503     movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14;
0504     movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13;
0505     movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12;
0506 
0507     vmovdqa %xmm0, IW_W1_ADDR(0, 0);
0508     vmovdqa %xmm0, IW_W1W2_ADDR(0, 0);
0509     vmovdqa %xmm0, IW_W1_ADDR(4, 0);
0510     vmovdqa %xmm0, IW_W1W2_ADDR(4, 0);
0511     vmovdqa %xmm0, IW_W1_ADDR(8, 0);
0512     vmovdqa %xmm0, IW_W1W2_ADDR(8, 0);
0513 
0514     movq %rbp, %rsp;
0515     popq %rbp;
0516     RET;
0517 SYM_FUNC_END(sm3_transform_avx)