Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /***************************************************************************
0003 *   Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de>        *
0004 *                                                                         *
0005 ***************************************************************************/
0006 
0007 .file "twofish-x86_64-asm.S"
0008 .text
0009 
0010 #include <linux/linkage.h>
0011 #include <asm/asm-offsets.h>
0012 
0013 #define a_offset    0
0014 #define b_offset    4
0015 #define c_offset    8
0016 #define d_offset    12
0017 
0018 /* Structure of the crypto context struct*/
0019 
0020 #define s0  0   /* S0 Array 256 Words each */
0021 #define s1  1024    /* S1 Array */
0022 #define s2  2048    /* S2 Array */
0023 #define s3  3072    /* S3 Array */
0024 #define w   4096    /* 8 whitening keys (word) */
0025 #define k   4128    /* key 1-32 ( word ) */
0026 
0027 /* define a few register aliases to allow macro substitution */
0028 
0029 #define R0     %rax
0030 #define R0D    %eax
0031 #define R0B    %al
0032 #define R0H    %ah
0033 
0034 #define R1     %rbx
0035 #define R1D    %ebx
0036 #define R1B    %bl
0037 #define R1H    %bh
0038 
0039 #define R2     %rcx
0040 #define R2D    %ecx
0041 #define R2B    %cl
0042 #define R2H    %ch
0043 
0044 #define R3     %rdx
0045 #define R3D    %edx
0046 #define R3B    %dl
0047 #define R3H    %dh
0048 
0049 
0050 /* performs input whitening */
0051 #define input_whitening(src,context,offset)\
0052     xor w+offset(context),  src;
0053 
0054 /* performs input whitening */
0055 #define output_whitening(src,context,offset)\
0056     xor w+16+offset(context),   src;
0057 
0058 
0059 /*
0060  * a input register containing a (rotated 16)
0061  * b input register containing b
0062  * c input register containing c
0063  * d input register containing d (already rol $1)
0064  * operations on a and b are interleaved to increase performance
0065  */
0066 #define encrypt_round(a,b,c,d,round)\
0067     movzx   b ## B,     %edi;\
0068     mov s1(%r11,%rdi,4),%r8d;\
0069     movzx   a ## B,     %edi;\
0070     mov s2(%r11,%rdi,4),%r9d;\
0071     movzx   b ## H,     %edi;\
0072     ror $16,        b ## D;\
0073     xor s2(%r11,%rdi,4),%r8d;\
0074     movzx   a ## H,     %edi;\
0075     ror $16,        a ## D;\
0076     xor s3(%r11,%rdi,4),%r9d;\
0077     movzx   b ## B,     %edi;\
0078     xor s3(%r11,%rdi,4),%r8d;\
0079     movzx   a ## B,     %edi;\
0080     xor (%r11,%rdi,4),  %r9d;\
0081     movzx   b ## H,     %edi;\
0082     ror $15,        b ## D;\
0083     xor (%r11,%rdi,4),  %r8d;\
0084     movzx   a ## H,     %edi;\
0085     xor s1(%r11,%rdi,4),%r9d;\
0086     add %r8d,       %r9d;\
0087     add %r9d,       %r8d;\
0088     add k+round(%r11),  %r9d;\
0089     xor %r9d,       c ## D;\
0090     rol $15,        c ## D;\
0091     add k+4+round(%r11),%r8d;\
0092     xor %r8d,       d ## D;
0093 
0094 /*
0095  * a input register containing a(rotated 16)
0096  * b input register containing b
0097  * c input register containing c
0098  * d input register containing d (already rol $1)
0099  * operations on a and b are interleaved to increase performance
0100  * during the round a and b are prepared for the output whitening
0101  */
0102 #define encrypt_last_round(a,b,c,d,round)\
0103     mov b ## D,     %r10d;\
0104     shl $32,        %r10;\
0105     movzx   b ## B,     %edi;\
0106     mov s1(%r11,%rdi,4),%r8d;\
0107     movzx   a ## B,     %edi;\
0108     mov s2(%r11,%rdi,4),%r9d;\
0109     movzx   b ## H,     %edi;\
0110     ror $16,        b ## D;\
0111     xor s2(%r11,%rdi,4),%r8d;\
0112     movzx   a ## H,     %edi;\
0113     ror $16,        a ## D;\
0114     xor s3(%r11,%rdi,4),%r9d;\
0115     movzx   b ## B,     %edi;\
0116     xor s3(%r11,%rdi,4),%r8d;\
0117     movzx   a ## B,     %edi;\
0118     xor (%r11,%rdi,4),  %r9d;\
0119     xor a,      %r10;\
0120     movzx   b ## H,     %edi;\
0121     xor (%r11,%rdi,4),  %r8d;\
0122     movzx   a ## H,     %edi;\
0123     xor s1(%r11,%rdi,4),%r9d;\
0124     add %r8d,       %r9d;\
0125     add %r9d,       %r8d;\
0126     add k+round(%r11),  %r9d;\
0127     xor %r9d,       c ## D;\
0128     ror $1,     c ## D;\
0129     add k+4+round(%r11),%r8d;\
0130     xor %r8d,       d ## D
0131 
0132 /*
0133  * a input register containing a
0134  * b input register containing b (rotated 16)
0135  * c input register containing c (already rol $1)
0136  * d input register containing d
0137  * operations on a and b are interleaved to increase performance
0138  */
0139 #define decrypt_round(a,b,c,d,round)\
0140     movzx   a ## B,     %edi;\
0141     mov (%r11,%rdi,4),  %r9d;\
0142     movzx   b ## B,     %edi;\
0143     mov s3(%r11,%rdi,4),%r8d;\
0144     movzx   a ## H,     %edi;\
0145     ror $16,        a ## D;\
0146     xor s1(%r11,%rdi,4),%r9d;\
0147     movzx   b ## H,     %edi;\
0148     ror $16,        b ## D;\
0149     xor (%r11,%rdi,4),  %r8d;\
0150     movzx   a ## B,     %edi;\
0151     xor s2(%r11,%rdi,4),%r9d;\
0152     movzx   b ## B,     %edi;\
0153     xor s1(%r11,%rdi,4),%r8d;\
0154     movzx   a ## H,     %edi;\
0155     ror $15,        a ## D;\
0156     xor s3(%r11,%rdi,4),%r9d;\
0157     movzx   b ## H,     %edi;\
0158     xor s2(%r11,%rdi,4),%r8d;\
0159     add %r8d,       %r9d;\
0160     add %r9d,       %r8d;\
0161     add k+round(%r11),  %r9d;\
0162     xor %r9d,       c ## D;\
0163     add k+4+round(%r11),%r8d;\
0164     xor %r8d,       d ## D;\
0165     rol $15,        d ## D;
0166 
0167 /*
0168  * a input register containing a
0169  * b input register containing b
0170  * c input register containing c (already rol $1)
0171  * d input register containing d
0172  * operations on a and b are interleaved to increase performance
0173  * during the round a and b are prepared for the output whitening
0174  */
0175 #define decrypt_last_round(a,b,c,d,round)\
0176     movzx   a ## B,     %edi;\
0177     mov (%r11,%rdi,4),  %r9d;\
0178     movzx   b ## B,     %edi;\
0179     mov s3(%r11,%rdi,4),%r8d;\
0180     movzx   b ## H,     %edi;\
0181     ror $16,        b ## D;\
0182     xor (%r11,%rdi,4),  %r8d;\
0183     movzx   a ## H,     %edi;\
0184     mov b ## D,     %r10d;\
0185     shl $32,        %r10;\
0186     xor a,      %r10;\
0187     ror $16,        a ## D;\
0188     xor s1(%r11,%rdi,4),%r9d;\
0189     movzx   b ## B,     %edi;\
0190     xor s1(%r11,%rdi,4),%r8d;\
0191     movzx   a ## B,     %edi;\
0192     xor s2(%r11,%rdi,4),%r9d;\
0193     movzx   b ## H,     %edi;\
0194     xor s2(%r11,%rdi,4),%r8d;\
0195     movzx   a ## H,     %edi;\
0196     xor s3(%r11,%rdi,4),%r9d;\
0197     add %r8d,       %r9d;\
0198     add %r9d,       %r8d;\
0199     add k+round(%r11),  %r9d;\
0200     xor %r9d,       c ## D;\
0201     add k+4+round(%r11),%r8d;\
0202     xor %r8d,       d ## D;\
0203     ror $1,     d ## D;
0204 
0205 SYM_FUNC_START(twofish_enc_blk)
0206     pushq    R1
0207 
0208     /* %rdi contains the ctx address */
0209     /* %rsi contains the output address */
0210     /* %rdx contains the input address */
0211     /* ctx address is moved to free one non-rex register
0212     as target for the 8bit high operations */
0213     mov %rdi,       %r11
0214 
0215     movq    (R3),   R1
0216     movq    8(R3),  R3
0217     input_whitening(R1,%r11,a_offset)
0218     input_whitening(R3,%r11,c_offset)
0219     mov R1D,    R0D
0220     rol $16,    R0D
0221     shr $32,    R1
0222     mov R3D,    R2D
0223     shr $32,    R3
0224     rol $1, R3D
0225 
0226     encrypt_round(R0,R1,R2,R3,0);
0227     encrypt_round(R2,R3,R0,R1,8);
0228     encrypt_round(R0,R1,R2,R3,2*8);
0229     encrypt_round(R2,R3,R0,R1,3*8);
0230     encrypt_round(R0,R1,R2,R3,4*8);
0231     encrypt_round(R2,R3,R0,R1,5*8);
0232     encrypt_round(R0,R1,R2,R3,6*8);
0233     encrypt_round(R2,R3,R0,R1,7*8);
0234     encrypt_round(R0,R1,R2,R3,8*8);
0235     encrypt_round(R2,R3,R0,R1,9*8);
0236     encrypt_round(R0,R1,R2,R3,10*8);
0237     encrypt_round(R2,R3,R0,R1,11*8);
0238     encrypt_round(R0,R1,R2,R3,12*8);
0239     encrypt_round(R2,R3,R0,R1,13*8);
0240     encrypt_round(R0,R1,R2,R3,14*8);
0241     encrypt_last_round(R2,R3,R0,R1,15*8);
0242 
0243 
0244     output_whitening(%r10,%r11,a_offset)
0245     movq    %r10,   (%rsi)
0246 
0247     shl $32,    R1
0248     xor R0, R1
0249 
0250     output_whitening(R1,%r11,c_offset)
0251     movq    R1, 8(%rsi)
0252 
0253     popq    R1
0254     movl    $1,%eax
0255     RET
0256 SYM_FUNC_END(twofish_enc_blk)
0257 
0258 SYM_FUNC_START(twofish_dec_blk)
0259     pushq    R1
0260 
0261     /* %rdi contains the ctx address */
0262     /* %rsi contains the output address */
0263     /* %rdx contains the input address */
0264     /* ctx address is moved to free one non-rex register
0265     as target for the 8bit high operations */
0266     mov %rdi,       %r11
0267 
0268     movq    (R3),   R1
0269     movq    8(R3),  R3
0270     output_whitening(R1,%r11,a_offset)
0271     output_whitening(R3,%r11,c_offset)
0272     mov R1D,    R0D
0273     shr $32,    R1
0274     rol $16,    R1D
0275     mov R3D,    R2D
0276     shr $32,    R3
0277     rol $1, R2D
0278 
0279     decrypt_round(R0,R1,R2,R3,15*8);
0280     decrypt_round(R2,R3,R0,R1,14*8);
0281     decrypt_round(R0,R1,R2,R3,13*8);
0282     decrypt_round(R2,R3,R0,R1,12*8);
0283     decrypt_round(R0,R1,R2,R3,11*8);
0284     decrypt_round(R2,R3,R0,R1,10*8);
0285     decrypt_round(R0,R1,R2,R3,9*8);
0286     decrypt_round(R2,R3,R0,R1,8*8);
0287     decrypt_round(R0,R1,R2,R3,7*8);
0288     decrypt_round(R2,R3,R0,R1,6*8);
0289     decrypt_round(R0,R1,R2,R3,5*8);
0290     decrypt_round(R2,R3,R0,R1,4*8);
0291     decrypt_round(R0,R1,R2,R3,3*8);
0292     decrypt_round(R2,R3,R0,R1,2*8);
0293     decrypt_round(R0,R1,R2,R3,1*8);
0294     decrypt_last_round(R2,R3,R0,R1,0);
0295 
0296     input_whitening(%r10,%r11,a_offset)
0297     movq    %r10,   (%rsi)
0298 
0299     shl $32,    R1
0300     xor R0, R1
0301 
0302     input_whitening(R1,%r11,c_offset)
0303     movq    R1, 8(%rsi)
0304 
0305     popq    R1
0306     movl    $1,%eax
0307     RET
0308 SYM_FUNC_END(twofish_dec_blk)