0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/hardirq.h>
0019 #include <linux/types.h>
0020 #include <linux/module.h>
0021 #include <linux/err.h>
0022 #include <crypto/algapi.h>
0023 #include <crypto/aes.h>
0024 #include <crypto/ctr.h>
0025 #include <crypto/b128ops.h>
0026 #include <crypto/gcm.h>
0027 #include <crypto/xts.h>
0028 #include <asm/cpu_device_id.h>
0029 #include <asm/simd.h>
0030 #include <crypto/scatterwalk.h>
0031 #include <crypto/internal/aead.h>
0032 #include <crypto/internal/simd.h>
0033 #include <crypto/internal/skcipher.h>
0034 #include <linux/jump_label.h>
0035 #include <linux/workqueue.h>
0036 #include <linux/spinlock.h>
0037 #include <linux/static_call.h>
0038
0039
0040 #define AESNI_ALIGN 16
0041 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
0042 #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
0043 #define RFC4106_HASH_SUBKEY_SIZE 16
0044 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
0045 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
0046 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
0047
0048
0049
0050
0051
0052 struct aesni_rfc4106_gcm_ctx {
0053 u8 hash_subkey[16] AESNI_ALIGN_ATTR;
0054 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
0055 u8 nonce[4];
0056 };
0057
0058 struct generic_gcmaes_ctx {
0059 u8 hash_subkey[16] AESNI_ALIGN_ATTR;
0060 struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
0061 };
0062
0063 struct aesni_xts_ctx {
0064 u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
0065 u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
0066 };
0067
0068 #define GCM_BLOCK_LEN 16
0069
0070 struct gcm_context_data {
0071
0072 u8 aad_hash[GCM_BLOCK_LEN];
0073 u64 aad_length;
0074 u64 in_length;
0075 u8 partial_block_enc_key[GCM_BLOCK_LEN];
0076 u8 orig_IV[GCM_BLOCK_LEN];
0077 u8 current_counter[GCM_BLOCK_LEN];
0078 u64 partial_block_len;
0079 u64 unused;
0080 u8 hash_keys[GCM_BLOCK_LEN * 16];
0081 };
0082
0083 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
0084 unsigned int key_len);
0085 asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
0086 asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
0087 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
0088 const u8 *in, unsigned int len);
0089 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
0090 const u8 *in, unsigned int len);
0091 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
0092 const u8 *in, unsigned int len, u8 *iv);
0093 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
0094 const u8 *in, unsigned int len, u8 *iv);
0095 asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
0096 const u8 *in, unsigned int len, u8 *iv);
0097 asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
0098 const u8 *in, unsigned int len, u8 *iv);
0099
0100 #define AVX_GEN2_OPTSIZE 640
0101 #define AVX_GEN4_OPTSIZE 4096
0102
0103 asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
0104 const u8 *in, unsigned int len, u8 *iv);
0105
0106 asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
0107 const u8 *in, unsigned int len, u8 *iv);
0108
0109 #ifdef CONFIG_X86_64
0110
0111 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
0112 const u8 *in, unsigned int len, u8 *iv);
0113 DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc);
0114
0115
0116 asmlinkage void aesni_gcm_init(void *ctx,
0117 struct gcm_context_data *gdata,
0118 u8 *iv,
0119 u8 *hash_subkey, const u8 *aad,
0120 unsigned long aad_len);
0121 asmlinkage void aesni_gcm_enc_update(void *ctx,
0122 struct gcm_context_data *gdata, u8 *out,
0123 const u8 *in, unsigned long plaintext_len);
0124 asmlinkage void aesni_gcm_dec_update(void *ctx,
0125 struct gcm_context_data *gdata, u8 *out,
0126 const u8 *in,
0127 unsigned long ciphertext_len);
0128 asmlinkage void aesni_gcm_finalize(void *ctx,
0129 struct gcm_context_data *gdata,
0130 u8 *auth_tag, unsigned long auth_tag_len);
0131
0132 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
0133 void *keys, u8 *out, unsigned int num_bytes);
0134 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
0135 void *keys, u8 *out, unsigned int num_bytes);
0136 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
0137 void *keys, u8 *out, unsigned int num_bytes);
0138
0139
0140 asmlinkage void aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv,
0141 const void *keys, u8 *out, unsigned int num_bytes,
0142 unsigned int byte_ctr);
0143
0144 asmlinkage void aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv,
0145 const void *keys, u8 *out, unsigned int num_bytes,
0146 unsigned int byte_ctr);
0147
0148 asmlinkage void aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv,
0149 const void *keys, u8 *out, unsigned int num_bytes,
0150 unsigned int byte_ctr);
0151
0152
0153
0154
0155
0156
0157 asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
0158 struct gcm_context_data *gdata,
0159 u8 *iv,
0160 u8 *hash_subkey,
0161 const u8 *aad,
0162 unsigned long aad_len);
0163
0164 asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
0165 struct gcm_context_data *gdata, u8 *out,
0166 const u8 *in, unsigned long plaintext_len);
0167 asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
0168 struct gcm_context_data *gdata, u8 *out,
0169 const u8 *in,
0170 unsigned long ciphertext_len);
0171 asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
0172 struct gcm_context_data *gdata,
0173 u8 *auth_tag, unsigned long auth_tag_len);
0174
0175
0176
0177
0178
0179
0180 asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
0181 struct gcm_context_data *gdata,
0182 u8 *iv,
0183 u8 *hash_subkey,
0184 const u8 *aad,
0185 unsigned long aad_len);
0186
0187 asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
0188 struct gcm_context_data *gdata, u8 *out,
0189 const u8 *in, unsigned long plaintext_len);
0190 asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
0191 struct gcm_context_data *gdata, u8 *out,
0192 const u8 *in,
0193 unsigned long ciphertext_len);
0194 asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
0195 struct gcm_context_data *gdata,
0196 u8 *auth_tag, unsigned long auth_tag_len);
0197
0198 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx);
0199 static __ro_after_init DEFINE_STATIC_KEY_FALSE(gcm_use_avx2);
0200
0201 static inline struct
0202 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
0203 {
0204 unsigned long align = AESNI_ALIGN;
0205
0206 if (align <= crypto_tfm_ctx_alignment())
0207 align = 1;
0208 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
0209 }
0210
0211 static inline struct
0212 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
0213 {
0214 unsigned long align = AESNI_ALIGN;
0215
0216 if (align <= crypto_tfm_ctx_alignment())
0217 align = 1;
0218 return PTR_ALIGN(crypto_aead_ctx(tfm), align);
0219 }
0220 #endif
0221
0222 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
0223 {
0224 unsigned long addr = (unsigned long)raw_ctx;
0225 unsigned long align = AESNI_ALIGN;
0226
0227 if (align <= crypto_tfm_ctx_alignment())
0228 align = 1;
0229 return (struct crypto_aes_ctx *)ALIGN(addr, align);
0230 }
0231
0232 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
0233 const u8 *in_key, unsigned int key_len)
0234 {
0235 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
0236 int err;
0237
0238 if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
0239 key_len != AES_KEYSIZE_256)
0240 return -EINVAL;
0241
0242 if (!crypto_simd_usable())
0243 err = aes_expandkey(ctx, in_key, key_len);
0244 else {
0245 kernel_fpu_begin();
0246 err = aesni_set_key(ctx, in_key, key_len);
0247 kernel_fpu_end();
0248 }
0249
0250 return err;
0251 }
0252
0253 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
0254 unsigned int key_len)
0255 {
0256 return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
0257 }
0258
0259 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
0260 {
0261 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
0262
0263 if (!crypto_simd_usable()) {
0264 aes_encrypt(ctx, dst, src);
0265 } else {
0266 kernel_fpu_begin();
0267 aesni_enc(ctx, dst, src);
0268 kernel_fpu_end();
0269 }
0270 }
0271
0272 static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
0273 {
0274 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
0275
0276 if (!crypto_simd_usable()) {
0277 aes_decrypt(ctx, dst, src);
0278 } else {
0279 kernel_fpu_begin();
0280 aesni_dec(ctx, dst, src);
0281 kernel_fpu_end();
0282 }
0283 }
0284
0285 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
0286 unsigned int len)
0287 {
0288 return aes_set_key_common(crypto_skcipher_tfm(tfm),
0289 crypto_skcipher_ctx(tfm), key, len);
0290 }
0291
0292 static int ecb_encrypt(struct skcipher_request *req)
0293 {
0294 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0295 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0296 struct skcipher_walk walk;
0297 unsigned int nbytes;
0298 int err;
0299
0300 err = skcipher_walk_virt(&walk, req, false);
0301
0302 while ((nbytes = walk.nbytes)) {
0303 kernel_fpu_begin();
0304 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
0305 nbytes & AES_BLOCK_MASK);
0306 kernel_fpu_end();
0307 nbytes &= AES_BLOCK_SIZE - 1;
0308 err = skcipher_walk_done(&walk, nbytes);
0309 }
0310
0311 return err;
0312 }
0313
0314 static int ecb_decrypt(struct skcipher_request *req)
0315 {
0316 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0317 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0318 struct skcipher_walk walk;
0319 unsigned int nbytes;
0320 int err;
0321
0322 err = skcipher_walk_virt(&walk, req, false);
0323
0324 while ((nbytes = walk.nbytes)) {
0325 kernel_fpu_begin();
0326 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
0327 nbytes & AES_BLOCK_MASK);
0328 kernel_fpu_end();
0329 nbytes &= AES_BLOCK_SIZE - 1;
0330 err = skcipher_walk_done(&walk, nbytes);
0331 }
0332
0333 return err;
0334 }
0335
0336 static int cbc_encrypt(struct skcipher_request *req)
0337 {
0338 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0339 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0340 struct skcipher_walk walk;
0341 unsigned int nbytes;
0342 int err;
0343
0344 err = skcipher_walk_virt(&walk, req, false);
0345
0346 while ((nbytes = walk.nbytes)) {
0347 kernel_fpu_begin();
0348 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
0349 nbytes & AES_BLOCK_MASK, walk.iv);
0350 kernel_fpu_end();
0351 nbytes &= AES_BLOCK_SIZE - 1;
0352 err = skcipher_walk_done(&walk, nbytes);
0353 }
0354
0355 return err;
0356 }
0357
0358 static int cbc_decrypt(struct skcipher_request *req)
0359 {
0360 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0361 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0362 struct skcipher_walk walk;
0363 unsigned int nbytes;
0364 int err;
0365
0366 err = skcipher_walk_virt(&walk, req, false);
0367
0368 while ((nbytes = walk.nbytes)) {
0369 kernel_fpu_begin();
0370 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
0371 nbytes & AES_BLOCK_MASK, walk.iv);
0372 kernel_fpu_end();
0373 nbytes &= AES_BLOCK_SIZE - 1;
0374 err = skcipher_walk_done(&walk, nbytes);
0375 }
0376
0377 return err;
0378 }
0379
0380 static int cts_cbc_encrypt(struct skcipher_request *req)
0381 {
0382 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0383 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0384 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
0385 struct scatterlist *src = req->src, *dst = req->dst;
0386 struct scatterlist sg_src[2], sg_dst[2];
0387 struct skcipher_request subreq;
0388 struct skcipher_walk walk;
0389 int err;
0390
0391 skcipher_request_set_tfm(&subreq, tfm);
0392 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
0393 NULL, NULL);
0394
0395 if (req->cryptlen <= AES_BLOCK_SIZE) {
0396 if (req->cryptlen < AES_BLOCK_SIZE)
0397 return -EINVAL;
0398 cbc_blocks = 1;
0399 }
0400
0401 if (cbc_blocks > 0) {
0402 skcipher_request_set_crypt(&subreq, req->src, req->dst,
0403 cbc_blocks * AES_BLOCK_SIZE,
0404 req->iv);
0405
0406 err = cbc_encrypt(&subreq);
0407 if (err)
0408 return err;
0409
0410 if (req->cryptlen == AES_BLOCK_SIZE)
0411 return 0;
0412
0413 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
0414 if (req->dst != req->src)
0415 dst = scatterwalk_ffwd(sg_dst, req->dst,
0416 subreq.cryptlen);
0417 }
0418
0419
0420 skcipher_request_set_crypt(&subreq, src, dst,
0421 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
0422 req->iv);
0423
0424 err = skcipher_walk_virt(&walk, &subreq, false);
0425 if (err)
0426 return err;
0427
0428 kernel_fpu_begin();
0429 aesni_cts_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
0430 walk.nbytes, walk.iv);
0431 kernel_fpu_end();
0432
0433 return skcipher_walk_done(&walk, 0);
0434 }
0435
0436 static int cts_cbc_decrypt(struct skcipher_request *req)
0437 {
0438 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0439 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0440 int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
0441 struct scatterlist *src = req->src, *dst = req->dst;
0442 struct scatterlist sg_src[2], sg_dst[2];
0443 struct skcipher_request subreq;
0444 struct skcipher_walk walk;
0445 int err;
0446
0447 skcipher_request_set_tfm(&subreq, tfm);
0448 skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
0449 NULL, NULL);
0450
0451 if (req->cryptlen <= AES_BLOCK_SIZE) {
0452 if (req->cryptlen < AES_BLOCK_SIZE)
0453 return -EINVAL;
0454 cbc_blocks = 1;
0455 }
0456
0457 if (cbc_blocks > 0) {
0458 skcipher_request_set_crypt(&subreq, req->src, req->dst,
0459 cbc_blocks * AES_BLOCK_SIZE,
0460 req->iv);
0461
0462 err = cbc_decrypt(&subreq);
0463 if (err)
0464 return err;
0465
0466 if (req->cryptlen == AES_BLOCK_SIZE)
0467 return 0;
0468
0469 dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
0470 if (req->dst != req->src)
0471 dst = scatterwalk_ffwd(sg_dst, req->dst,
0472 subreq.cryptlen);
0473 }
0474
0475
0476 skcipher_request_set_crypt(&subreq, src, dst,
0477 req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
0478 req->iv);
0479
0480 err = skcipher_walk_virt(&walk, &subreq, false);
0481 if (err)
0482 return err;
0483
0484 kernel_fpu_begin();
0485 aesni_cts_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
0486 walk.nbytes, walk.iv);
0487 kernel_fpu_end();
0488
0489 return skcipher_walk_done(&walk, 0);
0490 }
0491
0492 #ifdef CONFIG_X86_64
0493 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
0494 const u8 *in, unsigned int len, u8 *iv)
0495 {
0496
0497
0498
0499
0500
0501
0502 if (ctx->key_length == AES_KEYSIZE_128)
0503 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
0504 else if (ctx->key_length == AES_KEYSIZE_192)
0505 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
0506 else
0507 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
0508 }
0509
0510 static int ctr_crypt(struct skcipher_request *req)
0511 {
0512 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0513 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0514 u8 keystream[AES_BLOCK_SIZE];
0515 struct skcipher_walk walk;
0516 unsigned int nbytes;
0517 int err;
0518
0519 err = skcipher_walk_virt(&walk, req, false);
0520
0521 while ((nbytes = walk.nbytes) > 0) {
0522 kernel_fpu_begin();
0523 if (nbytes & AES_BLOCK_MASK)
0524 static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr,
0525 walk.src.virt.addr,
0526 nbytes & AES_BLOCK_MASK,
0527 walk.iv);
0528 nbytes &= ~AES_BLOCK_MASK;
0529
0530 if (walk.nbytes == walk.total && nbytes > 0) {
0531 aesni_enc(ctx, keystream, walk.iv);
0532 crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
0533 walk.src.virt.addr + walk.nbytes - nbytes,
0534 keystream, nbytes);
0535 crypto_inc(walk.iv, AES_BLOCK_SIZE);
0536 nbytes = 0;
0537 }
0538 kernel_fpu_end();
0539 err = skcipher_walk_done(&walk, nbytes);
0540 }
0541 return err;
0542 }
0543
0544 static void aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
0545 const u8 *in, unsigned int len, u8 *iv,
0546 unsigned int byte_ctr)
0547 {
0548 if (ctx->key_length == AES_KEYSIZE_128)
0549 aes_xctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len,
0550 byte_ctr);
0551 else if (ctx->key_length == AES_KEYSIZE_192)
0552 aes_xctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len,
0553 byte_ctr);
0554 else
0555 aes_xctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len,
0556 byte_ctr);
0557 }
0558
0559 static int xctr_crypt(struct skcipher_request *req)
0560 {
0561 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0562 struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
0563 u8 keystream[AES_BLOCK_SIZE];
0564 struct skcipher_walk walk;
0565 unsigned int nbytes;
0566 unsigned int byte_ctr = 0;
0567 int err;
0568 __le32 block[AES_BLOCK_SIZE / sizeof(__le32)];
0569
0570 err = skcipher_walk_virt(&walk, req, false);
0571
0572 while ((nbytes = walk.nbytes) > 0) {
0573 kernel_fpu_begin();
0574 if (nbytes & AES_BLOCK_MASK)
0575 aesni_xctr_enc_avx_tfm(ctx, walk.dst.virt.addr,
0576 walk.src.virt.addr, nbytes & AES_BLOCK_MASK,
0577 walk.iv, byte_ctr);
0578 nbytes &= ~AES_BLOCK_MASK;
0579 byte_ctr += walk.nbytes - nbytes;
0580
0581 if (walk.nbytes == walk.total && nbytes > 0) {
0582 memcpy(block, walk.iv, AES_BLOCK_SIZE);
0583 block[0] ^= cpu_to_le32(1 + byte_ctr / AES_BLOCK_SIZE);
0584 aesni_enc(ctx, keystream, (u8 *)block);
0585 crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes -
0586 nbytes, walk.src.virt.addr + walk.nbytes
0587 - nbytes, keystream, nbytes);
0588 byte_ctr += nbytes;
0589 nbytes = 0;
0590 }
0591 kernel_fpu_end();
0592 err = skcipher_walk_done(&walk, nbytes);
0593 }
0594 return err;
0595 }
0596
0597 static int
0598 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
0599 {
0600 struct crypto_aes_ctx ctx;
0601 int ret;
0602
0603 ret = aes_expandkey(&ctx, key, key_len);
0604 if (ret)
0605 return ret;
0606
0607
0608
0609 memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
0610
0611 aes_encrypt(&ctx, hash_subkey, hash_subkey);
0612
0613 memzero_explicit(&ctx, sizeof(ctx));
0614 return 0;
0615 }
0616
0617 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
0618 unsigned int key_len)
0619 {
0620 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
0621
0622 if (key_len < 4)
0623 return -EINVAL;
0624
0625
0626 key_len -= 4;
0627
0628 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
0629
0630 return aes_set_key_common(crypto_aead_tfm(aead),
0631 &ctx->aes_key_expanded, key, key_len) ?:
0632 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
0633 }
0634
0635
0636
0637 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
0638 unsigned int authsize)
0639 {
0640 switch (authsize) {
0641 case 8:
0642 case 12:
0643 case 16:
0644 break;
0645 default:
0646 return -EINVAL;
0647 }
0648
0649 return 0;
0650 }
0651
0652 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
0653 unsigned int authsize)
0654 {
0655 switch (authsize) {
0656 case 4:
0657 case 8:
0658 case 12:
0659 case 13:
0660 case 14:
0661 case 15:
0662 case 16:
0663 break;
0664 default:
0665 return -EINVAL;
0666 }
0667
0668 return 0;
0669 }
0670
0671 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
0672 unsigned int assoclen, u8 *hash_subkey,
0673 u8 *iv, void *aes_ctx, u8 *auth_tag,
0674 unsigned long auth_tag_len)
0675 {
0676 u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
0677 struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
0678 unsigned long left = req->cryptlen;
0679 struct scatter_walk assoc_sg_walk;
0680 struct skcipher_walk walk;
0681 bool do_avx, do_avx2;
0682 u8 *assocmem = NULL;
0683 u8 *assoc;
0684 int err;
0685
0686 if (!enc)
0687 left -= auth_tag_len;
0688
0689 do_avx = (left >= AVX_GEN2_OPTSIZE);
0690 do_avx2 = (left >= AVX_GEN4_OPTSIZE);
0691
0692
0693 if (req->src->length >= assoclen && req->src->length) {
0694 scatterwalk_start(&assoc_sg_walk, req->src);
0695 assoc = scatterwalk_map(&assoc_sg_walk);
0696 } else {
0697 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0698 GFP_KERNEL : GFP_ATOMIC;
0699
0700
0701 assocmem = kmalloc(assoclen, flags);
0702 if (unlikely(!assocmem))
0703 return -ENOMEM;
0704 assoc = assocmem;
0705
0706 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
0707 }
0708
0709 kernel_fpu_begin();
0710 if (static_branch_likely(&gcm_use_avx2) && do_avx2)
0711 aesni_gcm_init_avx_gen4(aes_ctx, data, iv, hash_subkey, assoc,
0712 assoclen);
0713 else if (static_branch_likely(&gcm_use_avx) && do_avx)
0714 aesni_gcm_init_avx_gen2(aes_ctx, data, iv, hash_subkey, assoc,
0715 assoclen);
0716 else
0717 aesni_gcm_init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
0718 kernel_fpu_end();
0719
0720 if (!assocmem)
0721 scatterwalk_unmap(assoc);
0722 else
0723 kfree(assocmem);
0724
0725 err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
0726 : skcipher_walk_aead_decrypt(&walk, req, false);
0727
0728 while (walk.nbytes > 0) {
0729 kernel_fpu_begin();
0730 if (static_branch_likely(&gcm_use_avx2) && do_avx2) {
0731 if (enc)
0732 aesni_gcm_enc_update_avx_gen4(aes_ctx, data,
0733 walk.dst.virt.addr,
0734 walk.src.virt.addr,
0735 walk.nbytes);
0736 else
0737 aesni_gcm_dec_update_avx_gen4(aes_ctx, data,
0738 walk.dst.virt.addr,
0739 walk.src.virt.addr,
0740 walk.nbytes);
0741 } else if (static_branch_likely(&gcm_use_avx) && do_avx) {
0742 if (enc)
0743 aesni_gcm_enc_update_avx_gen2(aes_ctx, data,
0744 walk.dst.virt.addr,
0745 walk.src.virt.addr,
0746 walk.nbytes);
0747 else
0748 aesni_gcm_dec_update_avx_gen2(aes_ctx, data,
0749 walk.dst.virt.addr,
0750 walk.src.virt.addr,
0751 walk.nbytes);
0752 } else if (enc) {
0753 aesni_gcm_enc_update(aes_ctx, data, walk.dst.virt.addr,
0754 walk.src.virt.addr, walk.nbytes);
0755 } else {
0756 aesni_gcm_dec_update(aes_ctx, data, walk.dst.virt.addr,
0757 walk.src.virt.addr, walk.nbytes);
0758 }
0759 kernel_fpu_end();
0760
0761 err = skcipher_walk_done(&walk, 0);
0762 }
0763
0764 if (err)
0765 return err;
0766
0767 kernel_fpu_begin();
0768 if (static_branch_likely(&gcm_use_avx2) && do_avx2)
0769 aesni_gcm_finalize_avx_gen4(aes_ctx, data, auth_tag,
0770 auth_tag_len);
0771 else if (static_branch_likely(&gcm_use_avx) && do_avx)
0772 aesni_gcm_finalize_avx_gen2(aes_ctx, data, auth_tag,
0773 auth_tag_len);
0774 else
0775 aesni_gcm_finalize(aes_ctx, data, auth_tag, auth_tag_len);
0776 kernel_fpu_end();
0777
0778 return 0;
0779 }
0780
0781 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
0782 u8 *hash_subkey, u8 *iv, void *aes_ctx)
0783 {
0784 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0785 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
0786 u8 auth_tag[16];
0787 int err;
0788
0789 err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
0790 auth_tag, auth_tag_len);
0791 if (err)
0792 return err;
0793
0794 scatterwalk_map_and_copy(auth_tag, req->dst,
0795 req->assoclen + req->cryptlen,
0796 auth_tag_len, 1);
0797 return 0;
0798 }
0799
0800 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
0801 u8 *hash_subkey, u8 *iv, void *aes_ctx)
0802 {
0803 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0804 unsigned long auth_tag_len = crypto_aead_authsize(tfm);
0805 u8 auth_tag_msg[16];
0806 u8 auth_tag[16];
0807 int err;
0808
0809 err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
0810 auth_tag, auth_tag_len);
0811 if (err)
0812 return err;
0813
0814
0815 scatterwalk_map_and_copy(auth_tag_msg, req->src,
0816 req->assoclen + req->cryptlen - auth_tag_len,
0817 auth_tag_len, 0);
0818
0819
0820 if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
0821 memzero_explicit(auth_tag, sizeof(auth_tag));
0822 return -EBADMSG;
0823 }
0824 return 0;
0825 }
0826
0827 static int helper_rfc4106_encrypt(struct aead_request *req)
0828 {
0829 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0830 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0831 void *aes_ctx = &(ctx->aes_key_expanded);
0832 u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
0833 u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
0834 unsigned int i;
0835 __be32 counter = cpu_to_be32(1);
0836
0837
0838
0839
0840 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
0841 return -EINVAL;
0842
0843
0844 for (i = 0; i < 4; i++)
0845 *(iv+i) = ctx->nonce[i];
0846 for (i = 0; i < 8; i++)
0847 *(iv+4+i) = req->iv[i];
0848 *((__be32 *)(iv+12)) = counter;
0849
0850 return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
0851 aes_ctx);
0852 }
0853
0854 static int helper_rfc4106_decrypt(struct aead_request *req)
0855 {
0856 __be32 counter = cpu_to_be32(1);
0857 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0858 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
0859 void *aes_ctx = &(ctx->aes_key_expanded);
0860 u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
0861 u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
0862 unsigned int i;
0863
0864 if (unlikely(req->assoclen != 16 && req->assoclen != 20))
0865 return -EINVAL;
0866
0867
0868
0869
0870
0871
0872 for (i = 0; i < 4; i++)
0873 *(iv+i) = ctx->nonce[i];
0874 for (i = 0; i < 8; i++)
0875 *(iv+4+i) = req->iv[i];
0876 *((__be32 *)(iv+12)) = counter;
0877
0878 return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
0879 aes_ctx);
0880 }
0881 #endif
0882
0883 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
0884 unsigned int keylen)
0885 {
0886 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
0887 int err;
0888
0889 err = xts_verify_key(tfm, key, keylen);
0890 if (err)
0891 return err;
0892
0893 keylen /= 2;
0894
0895
0896 err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
0897 key, keylen);
0898 if (err)
0899 return err;
0900
0901
0902 return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
0903 key + keylen, keylen);
0904 }
0905
0906 static int xts_crypt(struct skcipher_request *req, bool encrypt)
0907 {
0908 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0909 struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
0910 int tail = req->cryptlen % AES_BLOCK_SIZE;
0911 struct skcipher_request subreq;
0912 struct skcipher_walk walk;
0913 int err;
0914
0915 if (req->cryptlen < AES_BLOCK_SIZE)
0916 return -EINVAL;
0917
0918 err = skcipher_walk_virt(&walk, req, false);
0919 if (!walk.nbytes)
0920 return err;
0921
0922 if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
0923 int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
0924
0925 skcipher_walk_abort(&walk);
0926
0927 skcipher_request_set_tfm(&subreq, tfm);
0928 skcipher_request_set_callback(&subreq,
0929 skcipher_request_flags(req),
0930 NULL, NULL);
0931 skcipher_request_set_crypt(&subreq, req->src, req->dst,
0932 blocks * AES_BLOCK_SIZE, req->iv);
0933 req = &subreq;
0934
0935 err = skcipher_walk_virt(&walk, req, false);
0936 if (!walk.nbytes)
0937 return err;
0938 } else {
0939 tail = 0;
0940 }
0941
0942 kernel_fpu_begin();
0943
0944
0945 aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
0946
0947 while (walk.nbytes > 0) {
0948 int nbytes = walk.nbytes;
0949
0950 if (nbytes < walk.total)
0951 nbytes &= ~(AES_BLOCK_SIZE - 1);
0952
0953 if (encrypt)
0954 aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
0955 walk.dst.virt.addr, walk.src.virt.addr,
0956 nbytes, walk.iv);
0957 else
0958 aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
0959 walk.dst.virt.addr, walk.src.virt.addr,
0960 nbytes, walk.iv);
0961 kernel_fpu_end();
0962
0963 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
0964
0965 if (walk.nbytes > 0)
0966 kernel_fpu_begin();
0967 }
0968
0969 if (unlikely(tail > 0 && !err)) {
0970 struct scatterlist sg_src[2], sg_dst[2];
0971 struct scatterlist *src, *dst;
0972
0973 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
0974 if (req->dst != req->src)
0975 dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
0976
0977 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
0978 req->iv);
0979
0980 err = skcipher_walk_virt(&walk, &subreq, false);
0981 if (err)
0982 return err;
0983
0984 kernel_fpu_begin();
0985 if (encrypt)
0986 aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
0987 walk.dst.virt.addr, walk.src.virt.addr,
0988 walk.nbytes, walk.iv);
0989 else
0990 aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
0991 walk.dst.virt.addr, walk.src.virt.addr,
0992 walk.nbytes, walk.iv);
0993 kernel_fpu_end();
0994
0995 err = skcipher_walk_done(&walk, 0);
0996 }
0997 return err;
0998 }
0999
1000 static int xts_encrypt(struct skcipher_request *req)
1001 {
1002 return xts_crypt(req, true);
1003 }
1004
1005 static int xts_decrypt(struct skcipher_request *req)
1006 {
1007 return xts_crypt(req, false);
1008 }
1009
1010 static struct crypto_alg aesni_cipher_alg = {
1011 .cra_name = "aes",
1012 .cra_driver_name = "aes-aesni",
1013 .cra_priority = 300,
1014 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1015 .cra_blocksize = AES_BLOCK_SIZE,
1016 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1017 .cra_module = THIS_MODULE,
1018 .cra_u = {
1019 .cipher = {
1020 .cia_min_keysize = AES_MIN_KEY_SIZE,
1021 .cia_max_keysize = AES_MAX_KEY_SIZE,
1022 .cia_setkey = aes_set_key,
1023 .cia_encrypt = aesni_encrypt,
1024 .cia_decrypt = aesni_decrypt
1025 }
1026 }
1027 };
1028
1029 static struct skcipher_alg aesni_skciphers[] = {
1030 {
1031 .base = {
1032 .cra_name = "__ecb(aes)",
1033 .cra_driver_name = "__ecb-aes-aesni",
1034 .cra_priority = 400,
1035 .cra_flags = CRYPTO_ALG_INTERNAL,
1036 .cra_blocksize = AES_BLOCK_SIZE,
1037 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1038 .cra_module = THIS_MODULE,
1039 },
1040 .min_keysize = AES_MIN_KEY_SIZE,
1041 .max_keysize = AES_MAX_KEY_SIZE,
1042 .setkey = aesni_skcipher_setkey,
1043 .encrypt = ecb_encrypt,
1044 .decrypt = ecb_decrypt,
1045 }, {
1046 .base = {
1047 .cra_name = "__cbc(aes)",
1048 .cra_driver_name = "__cbc-aes-aesni",
1049 .cra_priority = 400,
1050 .cra_flags = CRYPTO_ALG_INTERNAL,
1051 .cra_blocksize = AES_BLOCK_SIZE,
1052 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1053 .cra_module = THIS_MODULE,
1054 },
1055 .min_keysize = AES_MIN_KEY_SIZE,
1056 .max_keysize = AES_MAX_KEY_SIZE,
1057 .ivsize = AES_BLOCK_SIZE,
1058 .setkey = aesni_skcipher_setkey,
1059 .encrypt = cbc_encrypt,
1060 .decrypt = cbc_decrypt,
1061 }, {
1062 .base = {
1063 .cra_name = "__cts(cbc(aes))",
1064 .cra_driver_name = "__cts-cbc-aes-aesni",
1065 .cra_priority = 400,
1066 .cra_flags = CRYPTO_ALG_INTERNAL,
1067 .cra_blocksize = AES_BLOCK_SIZE,
1068 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1069 .cra_module = THIS_MODULE,
1070 },
1071 .min_keysize = AES_MIN_KEY_SIZE,
1072 .max_keysize = AES_MAX_KEY_SIZE,
1073 .ivsize = AES_BLOCK_SIZE,
1074 .walksize = 2 * AES_BLOCK_SIZE,
1075 .setkey = aesni_skcipher_setkey,
1076 .encrypt = cts_cbc_encrypt,
1077 .decrypt = cts_cbc_decrypt,
1078 #ifdef CONFIG_X86_64
1079 }, {
1080 .base = {
1081 .cra_name = "__ctr(aes)",
1082 .cra_driver_name = "__ctr-aes-aesni",
1083 .cra_priority = 400,
1084 .cra_flags = CRYPTO_ALG_INTERNAL,
1085 .cra_blocksize = 1,
1086 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1087 .cra_module = THIS_MODULE,
1088 },
1089 .min_keysize = AES_MIN_KEY_SIZE,
1090 .max_keysize = AES_MAX_KEY_SIZE,
1091 .ivsize = AES_BLOCK_SIZE,
1092 .chunksize = AES_BLOCK_SIZE,
1093 .setkey = aesni_skcipher_setkey,
1094 .encrypt = ctr_crypt,
1095 .decrypt = ctr_crypt,
1096 #endif
1097 }, {
1098 .base = {
1099 .cra_name = "__xts(aes)",
1100 .cra_driver_name = "__xts-aes-aesni",
1101 .cra_priority = 401,
1102 .cra_flags = CRYPTO_ALG_INTERNAL,
1103 .cra_blocksize = AES_BLOCK_SIZE,
1104 .cra_ctxsize = XTS_AES_CTX_SIZE,
1105 .cra_module = THIS_MODULE,
1106 },
1107 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1108 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1109 .ivsize = AES_BLOCK_SIZE,
1110 .walksize = 2 * AES_BLOCK_SIZE,
1111 .setkey = xts_aesni_setkey,
1112 .encrypt = xts_encrypt,
1113 .decrypt = xts_decrypt,
1114 }
1115 };
1116
1117 static
1118 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1119
1120 #ifdef CONFIG_X86_64
1121
1122
1123
1124
1125 static struct skcipher_alg aesni_xctr = {
1126 .base = {
1127 .cra_name = "__xctr(aes)",
1128 .cra_driver_name = "__xctr-aes-aesni",
1129 .cra_priority = 400,
1130 .cra_flags = CRYPTO_ALG_INTERNAL,
1131 .cra_blocksize = 1,
1132 .cra_ctxsize = CRYPTO_AES_CTX_SIZE,
1133 .cra_module = THIS_MODULE,
1134 },
1135 .min_keysize = AES_MIN_KEY_SIZE,
1136 .max_keysize = AES_MAX_KEY_SIZE,
1137 .ivsize = AES_BLOCK_SIZE,
1138 .chunksize = AES_BLOCK_SIZE,
1139 .setkey = aesni_skcipher_setkey,
1140 .encrypt = xctr_crypt,
1141 .decrypt = xctr_crypt,
1142 };
1143
1144 static struct simd_skcipher_alg *aesni_simd_xctr;
1145 #endif
1146
1147 #ifdef CONFIG_X86_64
1148 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1149 unsigned int key_len)
1150 {
1151 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1152
1153 return aes_set_key_common(crypto_aead_tfm(aead),
1154 &ctx->aes_key_expanded, key, key_len) ?:
1155 rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1156 }
1157
1158 static int generic_gcmaes_encrypt(struct aead_request *req)
1159 {
1160 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1161 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1162 void *aes_ctx = &(ctx->aes_key_expanded);
1163 u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1164 u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1165 __be32 counter = cpu_to_be32(1);
1166
1167 memcpy(iv, req->iv, 12);
1168 *((__be32 *)(iv+12)) = counter;
1169
1170 return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1171 aes_ctx);
1172 }
1173
1174 static int generic_gcmaes_decrypt(struct aead_request *req)
1175 {
1176 __be32 counter = cpu_to_be32(1);
1177 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1178 struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1179 void *aes_ctx = &(ctx->aes_key_expanded);
1180 u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
1181 u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
1182
1183 memcpy(iv, req->iv, 12);
1184 *((__be32 *)(iv+12)) = counter;
1185
1186 return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1187 aes_ctx);
1188 }
1189
1190 static struct aead_alg aesni_aeads[] = { {
1191 .setkey = common_rfc4106_set_key,
1192 .setauthsize = common_rfc4106_set_authsize,
1193 .encrypt = helper_rfc4106_encrypt,
1194 .decrypt = helper_rfc4106_decrypt,
1195 .ivsize = GCM_RFC4106_IV_SIZE,
1196 .maxauthsize = 16,
1197 .base = {
1198 .cra_name = "__rfc4106(gcm(aes))",
1199 .cra_driver_name = "__rfc4106-gcm-aesni",
1200 .cra_priority = 400,
1201 .cra_flags = CRYPTO_ALG_INTERNAL,
1202 .cra_blocksize = 1,
1203 .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
1204 .cra_alignmask = 0,
1205 .cra_module = THIS_MODULE,
1206 },
1207 }, {
1208 .setkey = generic_gcmaes_set_key,
1209 .setauthsize = generic_gcmaes_set_authsize,
1210 .encrypt = generic_gcmaes_encrypt,
1211 .decrypt = generic_gcmaes_decrypt,
1212 .ivsize = GCM_AES_IV_SIZE,
1213 .maxauthsize = 16,
1214 .base = {
1215 .cra_name = "__gcm(aes)",
1216 .cra_driver_name = "__generic-gcm-aesni",
1217 .cra_priority = 400,
1218 .cra_flags = CRYPTO_ALG_INTERNAL,
1219 .cra_blocksize = 1,
1220 .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
1221 .cra_alignmask = 0,
1222 .cra_module = THIS_MODULE,
1223 },
1224 } };
1225 #else
1226 static struct aead_alg aesni_aeads[0];
1227 #endif
1228
1229 static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
1230
1231 static const struct x86_cpu_id aesni_cpu_id[] = {
1232 X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
1233 {}
1234 };
1235 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1236
1237 static int __init aesni_init(void)
1238 {
1239 int err;
1240
1241 if (!x86_match_cpu(aesni_cpu_id))
1242 return -ENODEV;
1243 #ifdef CONFIG_X86_64
1244 if (boot_cpu_has(X86_FEATURE_AVX2)) {
1245 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1246 static_branch_enable(&gcm_use_avx);
1247 static_branch_enable(&gcm_use_avx2);
1248 } else
1249 if (boot_cpu_has(X86_FEATURE_AVX)) {
1250 pr_info("AVX version of gcm_enc/dec engaged.\n");
1251 static_branch_enable(&gcm_use_avx);
1252 } else {
1253 pr_info("SSE version of gcm_enc/dec engaged.\n");
1254 }
1255 if (boot_cpu_has(X86_FEATURE_AVX)) {
1256
1257 static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm);
1258 pr_info("AES CTR mode by8 optimization enabled\n");
1259 }
1260 #endif
1261
1262 err = crypto_register_alg(&aesni_cipher_alg);
1263 if (err)
1264 return err;
1265
1266 err = simd_register_skciphers_compat(aesni_skciphers,
1267 ARRAY_SIZE(aesni_skciphers),
1268 aesni_simd_skciphers);
1269 if (err)
1270 goto unregister_cipher;
1271
1272 err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1273 aesni_simd_aeads);
1274 if (err)
1275 goto unregister_skciphers;
1276
1277 #ifdef CONFIG_X86_64
1278 if (boot_cpu_has(X86_FEATURE_AVX))
1279 err = simd_register_skciphers_compat(&aesni_xctr, 1,
1280 &aesni_simd_xctr);
1281 if (err)
1282 goto unregister_aeads;
1283 #endif
1284
1285 return 0;
1286
1287 #ifdef CONFIG_X86_64
1288 unregister_aeads:
1289 simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1290 aesni_simd_aeads);
1291 #endif
1292
1293 unregister_skciphers:
1294 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1295 aesni_simd_skciphers);
1296 unregister_cipher:
1297 crypto_unregister_alg(&aesni_cipher_alg);
1298 return err;
1299 }
1300
1301 static void __exit aesni_exit(void)
1302 {
1303 simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
1304 aesni_simd_aeads);
1305 simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
1306 aesni_simd_skciphers);
1307 crypto_unregister_alg(&aesni_cipher_alg);
1308 #ifdef CONFIG_X86_64
1309 if (boot_cpu_has(X86_FEATURE_AVX))
1310 simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr);
1311 #endif
1312 }
1313
1314 late_initcall(aesni_init);
1315 module_exit(aesni_exit);
1316
1317 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1318 MODULE_LICENSE("GPL");
1319 MODULE_ALIAS_CRYPTO("aes");