0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/bitops.h>
0015 #include <linux/export.h>
0016 #include <linux/module.h>
0017 #include <linux/string.h>
0018 #include <crypto/sha2.h>
0019 #include <asm/unaligned.h>
0020
0021 static const u32 SHA256_K[] = {
0022 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0023 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0024 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0025 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0026 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0027 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0028 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0029 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0030 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0031 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0032 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0033 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0034 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0035 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0036 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0037 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
0038 };
0039
0040 static inline u32 Ch(u32 x, u32 y, u32 z)
0041 {
0042 return z ^ (x & (y ^ z));
0043 }
0044
0045 static inline u32 Maj(u32 x, u32 y, u32 z)
0046 {
0047 return (x & y) | (z & (x | y));
0048 }
0049
0050 #define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
0051 #define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
0052 #define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
0053 #define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
0054
0055 static inline void LOAD_OP(int I, u32 *W, const u8 *input)
0056 {
0057 W[I] = get_unaligned_be32((__u32 *)input + I);
0058 }
0059
0060 static inline void BLEND_OP(int I, u32 *W)
0061 {
0062 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
0063 }
0064
0065 #define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \
0066 u32 t1, t2; \
0067 t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \
0068 t2 = e0(a) + Maj(a, b, c); \
0069 d += t1; \
0070 h = t1 + t2; \
0071 } while (0)
0072
0073 static void sha256_transform(u32 *state, const u8 *input, u32 *W)
0074 {
0075 u32 a, b, c, d, e, f, g, h;
0076 int i;
0077
0078
0079 for (i = 0; i < 16; i += 8) {
0080 LOAD_OP(i + 0, W, input);
0081 LOAD_OP(i + 1, W, input);
0082 LOAD_OP(i + 2, W, input);
0083 LOAD_OP(i + 3, W, input);
0084 LOAD_OP(i + 4, W, input);
0085 LOAD_OP(i + 5, W, input);
0086 LOAD_OP(i + 6, W, input);
0087 LOAD_OP(i + 7, W, input);
0088 }
0089
0090
0091 for (i = 16; i < 64; i += 8) {
0092 BLEND_OP(i + 0, W);
0093 BLEND_OP(i + 1, W);
0094 BLEND_OP(i + 2, W);
0095 BLEND_OP(i + 3, W);
0096 BLEND_OP(i + 4, W);
0097 BLEND_OP(i + 5, W);
0098 BLEND_OP(i + 6, W);
0099 BLEND_OP(i + 7, W);
0100 }
0101
0102
0103 a = state[0]; b = state[1]; c = state[2]; d = state[3];
0104 e = state[4]; f = state[5]; g = state[6]; h = state[7];
0105
0106
0107 for (i = 0; i < 64; i += 8) {
0108 SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h);
0109 SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g);
0110 SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f);
0111 SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e);
0112 SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d);
0113 SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c);
0114 SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b);
0115 SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a);
0116 }
0117
0118 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
0119 state[4] += e; state[5] += f; state[6] += g; state[7] += h;
0120 }
0121
0122 void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
0123 {
0124 unsigned int partial, done;
0125 const u8 *src;
0126 u32 W[64];
0127
0128 partial = sctx->count & 0x3f;
0129 sctx->count += len;
0130 done = 0;
0131 src = data;
0132
0133 if ((partial + len) > 63) {
0134 if (partial) {
0135 done = -partial;
0136 memcpy(sctx->buf + partial, data, done + 64);
0137 src = sctx->buf;
0138 }
0139
0140 do {
0141 sha256_transform(sctx->state, src, W);
0142 done += 64;
0143 src = data + done;
0144 } while (done + 63 < len);
0145
0146 memzero_explicit(W, sizeof(W));
0147
0148 partial = 0;
0149 }
0150 memcpy(sctx->buf + partial, src, len - done);
0151 }
0152 EXPORT_SYMBOL(sha256_update);
0153
0154 void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
0155 {
0156 sha256_update(sctx, data, len);
0157 }
0158 EXPORT_SYMBOL(sha224_update);
0159
0160 static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
0161 {
0162 __be32 *dst = (__be32 *)out;
0163 __be64 bits;
0164 unsigned int index, pad_len;
0165 int i;
0166 static const u8 padding[64] = { 0x80, };
0167
0168
0169 bits = cpu_to_be64(sctx->count << 3);
0170
0171
0172 index = sctx->count & 0x3f;
0173 pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
0174 sha256_update(sctx, padding, pad_len);
0175
0176
0177 sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
0178
0179
0180 for (i = 0; i < digest_words; i++)
0181 put_unaligned_be32(sctx->state[i], &dst[i]);
0182
0183
0184 memzero_explicit(sctx, sizeof(*sctx));
0185 }
0186
0187 void sha256_final(struct sha256_state *sctx, u8 *out)
0188 {
0189 __sha256_final(sctx, out, 8);
0190 }
0191 EXPORT_SYMBOL(sha256_final);
0192
0193 void sha224_final(struct sha256_state *sctx, u8 *out)
0194 {
0195 __sha256_final(sctx, out, 7);
0196 }
0197 EXPORT_SYMBOL(sha224_final);
0198
0199 void sha256(const u8 *data, unsigned int len, u8 *out)
0200 {
0201 struct sha256_state sctx;
0202
0203 sha256_init(&sctx);
0204 sha256_update(&sctx, data, len);
0205 sha256_final(&sctx, out);
0206 }
0207 EXPORT_SYMBOL(sha256);
0208
0209 MODULE_LICENSE("GPL");