Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 /*
0003  * SM4, as specified in
0004  * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
0005  *
0006  * Copyright (C) 2018 ARM Limited or its affiliates.
0007  * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
0008  */
0009 
0010 #include <linux/module.h>
0011 #include <asm/unaligned.h>
0012 #include <crypto/sm4.h>
0013 
0014 static const u32 ____cacheline_aligned fk[4] = {
0015     0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
0016 };
0017 
0018 static const u32 ____cacheline_aligned ck[32] = {
0019     0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
0020     0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
0021     0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
0022     0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
0023     0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
0024     0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
0025     0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
0026     0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
0027 };
0028 
0029 static const u8 ____cacheline_aligned sbox[256] = {
0030     0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
0031     0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
0032     0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
0033     0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
0034     0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
0035     0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
0036     0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
0037     0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
0038     0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
0039     0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
0040     0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
0041     0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
0042     0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
0043     0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
0044     0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
0045     0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
0046     0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
0047     0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
0048     0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
0049     0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
0050     0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
0051     0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
0052     0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
0053     0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
0054     0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
0055     0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
0056     0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
0057     0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
0058     0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
0059     0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
0060     0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
0061     0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
0062 };
0063 
0064 extern const u32 crypto_sm4_fk[4] __alias(fk);
0065 extern const u32 crypto_sm4_ck[32] __alias(ck);
0066 extern const u8 crypto_sm4_sbox[256] __alias(sbox);
0067 
0068 EXPORT_SYMBOL(crypto_sm4_fk);
0069 EXPORT_SYMBOL(crypto_sm4_ck);
0070 EXPORT_SYMBOL(crypto_sm4_sbox);
0071 
0072 static inline u32 sm4_t_non_lin_sub(u32 x)
0073 {
0074     u32 out;
0075 
0076     out  = (u32)sbox[x & 0xff];
0077     out |= (u32)sbox[(x >> 8) & 0xff] << 8;
0078     out |= (u32)sbox[(x >> 16) & 0xff] << 16;
0079     out |= (u32)sbox[(x >> 24) & 0xff] << 24;
0080 
0081     return out;
0082 }
0083 
0084 static inline u32 sm4_key_lin_sub(u32 x)
0085 {
0086     return x ^ rol32(x, 13) ^ rol32(x, 23);
0087 }
0088 
0089 static inline u32 sm4_enc_lin_sub(u32 x)
0090 {
0091     return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
0092 }
0093 
0094 static inline u32 sm4_key_sub(u32 x)
0095 {
0096     return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
0097 }
0098 
0099 static inline u32 sm4_enc_sub(u32 x)
0100 {
0101     return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
0102 }
0103 
0104 static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk)
0105 {
0106     return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk);
0107 }
0108 
0109 
0110 /**
0111  * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
0112  * @ctx:    The location where the computed key will be stored.
0113  * @in_key: The supplied key.
0114  * @key_len:    The length of the supplied key.
0115  *
0116  * Returns 0 on success. The function fails only if an invalid key size (or
0117  * pointer) is supplied.
0118  */
0119 int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
0120               unsigned int key_len)
0121 {
0122     u32 rk[4];
0123     const u32 *key = (u32 *)in_key;
0124     int i;
0125 
0126     if (key_len != SM4_KEY_SIZE)
0127         return -EINVAL;
0128 
0129     rk[0] = get_unaligned_be32(&key[0]) ^ fk[0];
0130     rk[1] = get_unaligned_be32(&key[1]) ^ fk[1];
0131     rk[2] = get_unaligned_be32(&key[2]) ^ fk[2];
0132     rk[3] = get_unaligned_be32(&key[3]) ^ fk[3];
0133 
0134     for (i = 0; i < 32; i += 4) {
0135         rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]);
0136         rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]);
0137         rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]);
0138         rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]);
0139 
0140         ctx->rkey_enc[i + 0] = rk[0];
0141         ctx->rkey_enc[i + 1] = rk[1];
0142         ctx->rkey_enc[i + 2] = rk[2];
0143         ctx->rkey_enc[i + 3] = rk[3];
0144         ctx->rkey_dec[31 - 0 - i] = rk[0];
0145         ctx->rkey_dec[31 - 1 - i] = rk[1];
0146         ctx->rkey_dec[31 - 2 - i] = rk[2];
0147         ctx->rkey_dec[31 - 3 - i] = rk[3];
0148     }
0149 
0150     return 0;
0151 }
0152 EXPORT_SYMBOL_GPL(sm4_expandkey);
0153 
0154 /**
0155  * sm4_crypt_block - Encrypt or decrypt a single SM4 block
0156  * @rk:     The rkey_enc for encrypt or rkey_dec for decrypt
0157  * @out:    Buffer to store output data
0158  * @in:     Buffer containing the input data
0159  */
0160 void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in)
0161 {
0162     u32 x[4], i;
0163 
0164     x[0] = get_unaligned_be32(in + 0 * 4);
0165     x[1] = get_unaligned_be32(in + 1 * 4);
0166     x[2] = get_unaligned_be32(in + 2 * 4);
0167     x[3] = get_unaligned_be32(in + 3 * 4);
0168 
0169     for (i = 0; i < 32; i += 4) {
0170         x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]);
0171         x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]);
0172         x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]);
0173         x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]);
0174     }
0175 
0176     put_unaligned_be32(x[3 - 0], out + 0 * 4);
0177     put_unaligned_be32(x[3 - 1], out + 1 * 4);
0178     put_unaligned_be32(x[3 - 2], out + 2 * 4);
0179     put_unaligned_be32(x[3 - 3], out + 3 * 4);
0180 }
0181 EXPORT_SYMBOL_GPL(sm4_crypt_block);
0182 
0183 MODULE_DESCRIPTION("Generic SM4 library");
0184 MODULE_LICENSE("GPL v2");