0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define KMSG_COMPONENT "aes_s390"
0018 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0019
0020 #include <crypto/aes.h>
0021 #include <crypto/algapi.h>
0022 #include <crypto/ghash.h>
0023 #include <crypto/internal/aead.h>
0024 #include <crypto/internal/cipher.h>
0025 #include <crypto/internal/skcipher.h>
0026 #include <crypto/scatterwalk.h>
0027 #include <linux/err.h>
0028 #include <linux/module.h>
0029 #include <linux/cpufeature.h>
0030 #include <linux/init.h>
0031 #include <linux/mutex.h>
0032 #include <linux/fips.h>
0033 #include <linux/string.h>
0034 #include <crypto/xts.h>
0035 #include <asm/cpacf.h>
0036
0037 static u8 *ctrblk;
0038 static DEFINE_MUTEX(ctrblk_lock);
0039
0040 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
0041 kma_functions;
0042
0043 struct s390_aes_ctx {
0044 u8 key[AES_MAX_KEY_SIZE];
0045 int key_len;
0046 unsigned long fc;
0047 union {
0048 struct crypto_skcipher *skcipher;
0049 struct crypto_cipher *cip;
0050 } fallback;
0051 };
0052
0053 struct s390_xts_ctx {
0054 u8 key[32];
0055 u8 pcc_key[32];
0056 int key_len;
0057 unsigned long fc;
0058 struct crypto_skcipher *fallback;
0059 };
0060
0061 struct gcm_sg_walk {
0062 struct scatter_walk walk;
0063 unsigned int walk_bytes;
0064 u8 *walk_ptr;
0065 unsigned int walk_bytes_remain;
0066 u8 buf[AES_BLOCK_SIZE];
0067 unsigned int buf_bytes;
0068 u8 *ptr;
0069 unsigned int nbytes;
0070 };
0071
0072 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
0073 unsigned int key_len)
0074 {
0075 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
0076
0077 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
0078 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
0079 CRYPTO_TFM_REQ_MASK);
0080
0081 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
0082 }
0083
0084 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
0085 unsigned int key_len)
0086 {
0087 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
0088 unsigned long fc;
0089
0090
0091 fc = (key_len == 16) ? CPACF_KM_AES_128 :
0092 (key_len == 24) ? CPACF_KM_AES_192 :
0093 (key_len == 32) ? CPACF_KM_AES_256 : 0;
0094
0095
0096 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
0097 if (!sctx->fc)
0098 return setkey_fallback_cip(tfm, in_key, key_len);
0099
0100 sctx->key_len = key_len;
0101 memcpy(sctx->key, in_key, key_len);
0102 return 0;
0103 }
0104
0105 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
0106 {
0107 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
0108
0109 if (unlikely(!sctx->fc)) {
0110 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
0111 return;
0112 }
0113 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
0114 }
0115
0116 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
0117 {
0118 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
0119
0120 if (unlikely(!sctx->fc)) {
0121 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
0122 return;
0123 }
0124 cpacf_km(sctx->fc | CPACF_DECRYPT,
0125 &sctx->key, out, in, AES_BLOCK_SIZE);
0126 }
0127
0128 static int fallback_init_cip(struct crypto_tfm *tfm)
0129 {
0130 const char *name = tfm->__crt_alg->cra_name;
0131 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
0132
0133 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
0134 CRYPTO_ALG_NEED_FALLBACK);
0135
0136 if (IS_ERR(sctx->fallback.cip)) {
0137 pr_err("Allocating AES fallback algorithm %s failed\n",
0138 name);
0139 return PTR_ERR(sctx->fallback.cip);
0140 }
0141
0142 return 0;
0143 }
0144
0145 static void fallback_exit_cip(struct crypto_tfm *tfm)
0146 {
0147 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
0148
0149 crypto_free_cipher(sctx->fallback.cip);
0150 sctx->fallback.cip = NULL;
0151 }
0152
0153 static struct crypto_alg aes_alg = {
0154 .cra_name = "aes",
0155 .cra_driver_name = "aes-s390",
0156 .cra_priority = 300,
0157 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
0158 CRYPTO_ALG_NEED_FALLBACK,
0159 .cra_blocksize = AES_BLOCK_SIZE,
0160 .cra_ctxsize = sizeof(struct s390_aes_ctx),
0161 .cra_module = THIS_MODULE,
0162 .cra_init = fallback_init_cip,
0163 .cra_exit = fallback_exit_cip,
0164 .cra_u = {
0165 .cipher = {
0166 .cia_min_keysize = AES_MIN_KEY_SIZE,
0167 .cia_max_keysize = AES_MAX_KEY_SIZE,
0168 .cia_setkey = aes_set_key,
0169 .cia_encrypt = crypto_aes_encrypt,
0170 .cia_decrypt = crypto_aes_decrypt,
0171 }
0172 }
0173 };
0174
0175 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
0176 unsigned int len)
0177 {
0178 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0179
0180 crypto_skcipher_clear_flags(sctx->fallback.skcipher,
0181 CRYPTO_TFM_REQ_MASK);
0182 crypto_skcipher_set_flags(sctx->fallback.skcipher,
0183 crypto_skcipher_get_flags(tfm) &
0184 CRYPTO_TFM_REQ_MASK);
0185 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
0186 }
0187
0188 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
0189 struct skcipher_request *req,
0190 unsigned long modifier)
0191 {
0192 struct skcipher_request *subreq = skcipher_request_ctx(req);
0193
0194 *subreq = *req;
0195 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
0196 return (modifier & CPACF_DECRYPT) ?
0197 crypto_skcipher_decrypt(subreq) :
0198 crypto_skcipher_encrypt(subreq);
0199 }
0200
0201 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
0202 unsigned int key_len)
0203 {
0204 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0205 unsigned long fc;
0206
0207
0208 fc = (key_len == 16) ? CPACF_KM_AES_128 :
0209 (key_len == 24) ? CPACF_KM_AES_192 :
0210 (key_len == 32) ? CPACF_KM_AES_256 : 0;
0211
0212
0213 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
0214 if (!sctx->fc)
0215 return setkey_fallback_skcipher(tfm, in_key, key_len);
0216
0217 sctx->key_len = key_len;
0218 memcpy(sctx->key, in_key, key_len);
0219 return 0;
0220 }
0221
0222 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
0223 {
0224 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0225 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0226 struct skcipher_walk walk;
0227 unsigned int nbytes, n;
0228 int ret;
0229
0230 if (unlikely(!sctx->fc))
0231 return fallback_skcipher_crypt(sctx, req, modifier);
0232
0233 ret = skcipher_walk_virt(&walk, req, false);
0234 while ((nbytes = walk.nbytes) != 0) {
0235
0236 n = nbytes & ~(AES_BLOCK_SIZE - 1);
0237 cpacf_km(sctx->fc | modifier, sctx->key,
0238 walk.dst.virt.addr, walk.src.virt.addr, n);
0239 ret = skcipher_walk_done(&walk, nbytes - n);
0240 }
0241 return ret;
0242 }
0243
0244 static int ecb_aes_encrypt(struct skcipher_request *req)
0245 {
0246 return ecb_aes_crypt(req, 0);
0247 }
0248
0249 static int ecb_aes_decrypt(struct skcipher_request *req)
0250 {
0251 return ecb_aes_crypt(req, CPACF_DECRYPT);
0252 }
0253
0254 static int fallback_init_skcipher(struct crypto_skcipher *tfm)
0255 {
0256 const char *name = crypto_tfm_alg_name(&tfm->base);
0257 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0258
0259 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
0260 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
0261
0262 if (IS_ERR(sctx->fallback.skcipher)) {
0263 pr_err("Allocating AES fallback algorithm %s failed\n",
0264 name);
0265 return PTR_ERR(sctx->fallback.skcipher);
0266 }
0267
0268 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
0269 crypto_skcipher_reqsize(sctx->fallback.skcipher));
0270 return 0;
0271 }
0272
0273 static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
0274 {
0275 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0276
0277 crypto_free_skcipher(sctx->fallback.skcipher);
0278 }
0279
0280 static struct skcipher_alg ecb_aes_alg = {
0281 .base.cra_name = "ecb(aes)",
0282 .base.cra_driver_name = "ecb-aes-s390",
0283 .base.cra_priority = 401,
0284 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
0285 .base.cra_blocksize = AES_BLOCK_SIZE,
0286 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
0287 .base.cra_module = THIS_MODULE,
0288 .init = fallback_init_skcipher,
0289 .exit = fallback_exit_skcipher,
0290 .min_keysize = AES_MIN_KEY_SIZE,
0291 .max_keysize = AES_MAX_KEY_SIZE,
0292 .setkey = ecb_aes_set_key,
0293 .encrypt = ecb_aes_encrypt,
0294 .decrypt = ecb_aes_decrypt,
0295 };
0296
0297 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
0298 unsigned int key_len)
0299 {
0300 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0301 unsigned long fc;
0302
0303
0304 fc = (key_len == 16) ? CPACF_KMC_AES_128 :
0305 (key_len == 24) ? CPACF_KMC_AES_192 :
0306 (key_len == 32) ? CPACF_KMC_AES_256 : 0;
0307
0308
0309 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
0310 if (!sctx->fc)
0311 return setkey_fallback_skcipher(tfm, in_key, key_len);
0312
0313 sctx->key_len = key_len;
0314 memcpy(sctx->key, in_key, key_len);
0315 return 0;
0316 }
0317
0318 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
0319 {
0320 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0321 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0322 struct skcipher_walk walk;
0323 unsigned int nbytes, n;
0324 int ret;
0325 struct {
0326 u8 iv[AES_BLOCK_SIZE];
0327 u8 key[AES_MAX_KEY_SIZE];
0328 } param;
0329
0330 if (unlikely(!sctx->fc))
0331 return fallback_skcipher_crypt(sctx, req, modifier);
0332
0333 ret = skcipher_walk_virt(&walk, req, false);
0334 if (ret)
0335 return ret;
0336 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
0337 memcpy(param.key, sctx->key, sctx->key_len);
0338 while ((nbytes = walk.nbytes) != 0) {
0339
0340 n = nbytes & ~(AES_BLOCK_SIZE - 1);
0341 cpacf_kmc(sctx->fc | modifier, ¶m,
0342 walk.dst.virt.addr, walk.src.virt.addr, n);
0343 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
0344 ret = skcipher_walk_done(&walk, nbytes - n);
0345 }
0346 memzero_explicit(¶m, sizeof(param));
0347 return ret;
0348 }
0349
0350 static int cbc_aes_encrypt(struct skcipher_request *req)
0351 {
0352 return cbc_aes_crypt(req, 0);
0353 }
0354
0355 static int cbc_aes_decrypt(struct skcipher_request *req)
0356 {
0357 return cbc_aes_crypt(req, CPACF_DECRYPT);
0358 }
0359
0360 static struct skcipher_alg cbc_aes_alg = {
0361 .base.cra_name = "cbc(aes)",
0362 .base.cra_driver_name = "cbc-aes-s390",
0363 .base.cra_priority = 402,
0364 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
0365 .base.cra_blocksize = AES_BLOCK_SIZE,
0366 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
0367 .base.cra_module = THIS_MODULE,
0368 .init = fallback_init_skcipher,
0369 .exit = fallback_exit_skcipher,
0370 .min_keysize = AES_MIN_KEY_SIZE,
0371 .max_keysize = AES_MAX_KEY_SIZE,
0372 .ivsize = AES_BLOCK_SIZE,
0373 .setkey = cbc_aes_set_key,
0374 .encrypt = cbc_aes_encrypt,
0375 .decrypt = cbc_aes_decrypt,
0376 };
0377
0378 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
0379 unsigned int len)
0380 {
0381 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
0382
0383 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
0384 crypto_skcipher_set_flags(xts_ctx->fallback,
0385 crypto_skcipher_get_flags(tfm) &
0386 CRYPTO_TFM_REQ_MASK);
0387 return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
0388 }
0389
0390 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
0391 unsigned int key_len)
0392 {
0393 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
0394 unsigned long fc;
0395 int err;
0396
0397 err = xts_fallback_setkey(tfm, in_key, key_len);
0398 if (err)
0399 return err;
0400
0401
0402 if (fips_enabled && key_len != 32 && key_len != 64)
0403 return -EINVAL;
0404
0405
0406 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
0407 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
0408
0409
0410 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
0411 if (!xts_ctx->fc)
0412 return 0;
0413
0414
0415 key_len = key_len / 2;
0416 xts_ctx->key_len = key_len;
0417 memcpy(xts_ctx->key, in_key, key_len);
0418 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
0419 return 0;
0420 }
0421
0422 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
0423 {
0424 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0425 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
0426 struct skcipher_walk walk;
0427 unsigned int offset, nbytes, n;
0428 int ret;
0429 struct {
0430 u8 key[32];
0431 u8 tweak[16];
0432 u8 block[16];
0433 u8 bit[16];
0434 u8 xts[16];
0435 } pcc_param;
0436 struct {
0437 u8 key[32];
0438 u8 init[16];
0439 } xts_param;
0440
0441 if (req->cryptlen < AES_BLOCK_SIZE)
0442 return -EINVAL;
0443
0444 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
0445 struct skcipher_request *subreq = skcipher_request_ctx(req);
0446
0447 *subreq = *req;
0448 skcipher_request_set_tfm(subreq, xts_ctx->fallback);
0449 return (modifier & CPACF_DECRYPT) ?
0450 crypto_skcipher_decrypt(subreq) :
0451 crypto_skcipher_encrypt(subreq);
0452 }
0453
0454 ret = skcipher_walk_virt(&walk, req, false);
0455 if (ret)
0456 return ret;
0457 offset = xts_ctx->key_len & 0x10;
0458 memset(pcc_param.block, 0, sizeof(pcc_param.block));
0459 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
0460 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
0461 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
0462 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
0463 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
0464
0465 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
0466 memcpy(xts_param.init, pcc_param.xts, 16);
0467
0468 while ((nbytes = walk.nbytes) != 0) {
0469
0470 n = nbytes & ~(AES_BLOCK_SIZE - 1);
0471 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
0472 walk.dst.virt.addr, walk.src.virt.addr, n);
0473 ret = skcipher_walk_done(&walk, nbytes - n);
0474 }
0475 memzero_explicit(&pcc_param, sizeof(pcc_param));
0476 memzero_explicit(&xts_param, sizeof(xts_param));
0477 return ret;
0478 }
0479
0480 static int xts_aes_encrypt(struct skcipher_request *req)
0481 {
0482 return xts_aes_crypt(req, 0);
0483 }
0484
0485 static int xts_aes_decrypt(struct skcipher_request *req)
0486 {
0487 return xts_aes_crypt(req, CPACF_DECRYPT);
0488 }
0489
0490 static int xts_fallback_init(struct crypto_skcipher *tfm)
0491 {
0492 const char *name = crypto_tfm_alg_name(&tfm->base);
0493 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
0494
0495 xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
0496 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
0497
0498 if (IS_ERR(xts_ctx->fallback)) {
0499 pr_err("Allocating XTS fallback algorithm %s failed\n",
0500 name);
0501 return PTR_ERR(xts_ctx->fallback);
0502 }
0503 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
0504 crypto_skcipher_reqsize(xts_ctx->fallback));
0505 return 0;
0506 }
0507
0508 static void xts_fallback_exit(struct crypto_skcipher *tfm)
0509 {
0510 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
0511
0512 crypto_free_skcipher(xts_ctx->fallback);
0513 }
0514
0515 static struct skcipher_alg xts_aes_alg = {
0516 .base.cra_name = "xts(aes)",
0517 .base.cra_driver_name = "xts-aes-s390",
0518 .base.cra_priority = 402,
0519 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
0520 .base.cra_blocksize = AES_BLOCK_SIZE,
0521 .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
0522 .base.cra_module = THIS_MODULE,
0523 .init = xts_fallback_init,
0524 .exit = xts_fallback_exit,
0525 .min_keysize = 2 * AES_MIN_KEY_SIZE,
0526 .max_keysize = 2 * AES_MAX_KEY_SIZE,
0527 .ivsize = AES_BLOCK_SIZE,
0528 .setkey = xts_aes_set_key,
0529 .encrypt = xts_aes_encrypt,
0530 .decrypt = xts_aes_decrypt,
0531 };
0532
0533 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
0534 unsigned int key_len)
0535 {
0536 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0537 unsigned long fc;
0538
0539
0540 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
0541 (key_len == 24) ? CPACF_KMCTR_AES_192 :
0542 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
0543
0544
0545 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
0546 if (!sctx->fc)
0547 return setkey_fallback_skcipher(tfm, in_key, key_len);
0548
0549 sctx->key_len = key_len;
0550 memcpy(sctx->key, in_key, key_len);
0551 return 0;
0552 }
0553
0554 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
0555 {
0556 unsigned int i, n;
0557
0558
0559 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
0560 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
0561 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
0562 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
0563 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
0564 ctrptr += AES_BLOCK_SIZE;
0565 }
0566 return n;
0567 }
0568
0569 static int ctr_aes_crypt(struct skcipher_request *req)
0570 {
0571 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0572 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
0573 u8 buf[AES_BLOCK_SIZE], *ctrptr;
0574 struct skcipher_walk walk;
0575 unsigned int n, nbytes;
0576 int ret, locked;
0577
0578 if (unlikely(!sctx->fc))
0579 return fallback_skcipher_crypt(sctx, req, 0);
0580
0581 locked = mutex_trylock(&ctrblk_lock);
0582
0583 ret = skcipher_walk_virt(&walk, req, false);
0584 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
0585 n = AES_BLOCK_SIZE;
0586
0587 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
0588 n = __ctrblk_init(ctrblk, walk.iv, nbytes);
0589 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
0590 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
0591 walk.src.virt.addr, n, ctrptr);
0592 if (ctrptr == ctrblk)
0593 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
0594 AES_BLOCK_SIZE);
0595 crypto_inc(walk.iv, AES_BLOCK_SIZE);
0596 ret = skcipher_walk_done(&walk, nbytes - n);
0597 }
0598 if (locked)
0599 mutex_unlock(&ctrblk_lock);
0600
0601
0602
0603 if (nbytes) {
0604 cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
0605 AES_BLOCK_SIZE, walk.iv);
0606 memcpy(walk.dst.virt.addr, buf, nbytes);
0607 crypto_inc(walk.iv, AES_BLOCK_SIZE);
0608 ret = skcipher_walk_done(&walk, 0);
0609 }
0610
0611 return ret;
0612 }
0613
0614 static struct skcipher_alg ctr_aes_alg = {
0615 .base.cra_name = "ctr(aes)",
0616 .base.cra_driver_name = "ctr-aes-s390",
0617 .base.cra_priority = 402,
0618 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
0619 .base.cra_blocksize = 1,
0620 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
0621 .base.cra_module = THIS_MODULE,
0622 .init = fallback_init_skcipher,
0623 .exit = fallback_exit_skcipher,
0624 .min_keysize = AES_MIN_KEY_SIZE,
0625 .max_keysize = AES_MAX_KEY_SIZE,
0626 .ivsize = AES_BLOCK_SIZE,
0627 .setkey = ctr_aes_set_key,
0628 .encrypt = ctr_aes_crypt,
0629 .decrypt = ctr_aes_crypt,
0630 .chunksize = AES_BLOCK_SIZE,
0631 };
0632
0633 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
0634 unsigned int keylen)
0635 {
0636 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
0637
0638 switch (keylen) {
0639 case AES_KEYSIZE_128:
0640 ctx->fc = CPACF_KMA_GCM_AES_128;
0641 break;
0642 case AES_KEYSIZE_192:
0643 ctx->fc = CPACF_KMA_GCM_AES_192;
0644 break;
0645 case AES_KEYSIZE_256:
0646 ctx->fc = CPACF_KMA_GCM_AES_256;
0647 break;
0648 default:
0649 return -EINVAL;
0650 }
0651
0652 memcpy(ctx->key, key, keylen);
0653 ctx->key_len = keylen;
0654 return 0;
0655 }
0656
0657 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
0658 {
0659 switch (authsize) {
0660 case 4:
0661 case 8:
0662 case 12:
0663 case 13:
0664 case 14:
0665 case 15:
0666 case 16:
0667 break;
0668 default:
0669 return -EINVAL;
0670 }
0671
0672 return 0;
0673 }
0674
0675 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
0676 unsigned int len)
0677 {
0678 memset(gw, 0, sizeof(*gw));
0679 gw->walk_bytes_remain = len;
0680 scatterwalk_start(&gw->walk, sg);
0681 }
0682
0683 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
0684 {
0685 struct scatterlist *nextsg;
0686
0687 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
0688 while (!gw->walk_bytes) {
0689 nextsg = sg_next(gw->walk.sg);
0690 if (!nextsg)
0691 return 0;
0692 scatterwalk_start(&gw->walk, nextsg);
0693 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
0694 gw->walk_bytes_remain);
0695 }
0696 gw->walk_ptr = scatterwalk_map(&gw->walk);
0697 return gw->walk_bytes;
0698 }
0699
0700 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
0701 unsigned int nbytes)
0702 {
0703 gw->walk_bytes_remain -= nbytes;
0704 scatterwalk_unmap(gw->walk_ptr);
0705 scatterwalk_advance(&gw->walk, nbytes);
0706 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
0707 gw->walk_ptr = NULL;
0708 }
0709
0710 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
0711 {
0712 int n;
0713
0714 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
0715 gw->ptr = gw->buf;
0716 gw->nbytes = gw->buf_bytes;
0717 goto out;
0718 }
0719
0720 if (gw->walk_bytes_remain == 0) {
0721 gw->ptr = NULL;
0722 gw->nbytes = 0;
0723 goto out;
0724 }
0725
0726 if (!_gcm_sg_clamp_and_map(gw)) {
0727 gw->ptr = NULL;
0728 gw->nbytes = 0;
0729 goto out;
0730 }
0731
0732 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
0733 gw->ptr = gw->walk_ptr;
0734 gw->nbytes = gw->walk_bytes;
0735 goto out;
0736 }
0737
0738 while (1) {
0739 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
0740 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
0741 gw->buf_bytes += n;
0742 _gcm_sg_unmap_and_advance(gw, n);
0743 if (gw->buf_bytes >= minbytesneeded) {
0744 gw->ptr = gw->buf;
0745 gw->nbytes = gw->buf_bytes;
0746 goto out;
0747 }
0748 if (!_gcm_sg_clamp_and_map(gw)) {
0749 gw->ptr = NULL;
0750 gw->nbytes = 0;
0751 goto out;
0752 }
0753 }
0754
0755 out:
0756 return gw->nbytes;
0757 }
0758
0759 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
0760 {
0761 if (gw->walk_bytes_remain == 0) {
0762 gw->ptr = NULL;
0763 gw->nbytes = 0;
0764 goto out;
0765 }
0766
0767 if (!_gcm_sg_clamp_and_map(gw)) {
0768 gw->ptr = NULL;
0769 gw->nbytes = 0;
0770 goto out;
0771 }
0772
0773 if (gw->walk_bytes >= minbytesneeded) {
0774 gw->ptr = gw->walk_ptr;
0775 gw->nbytes = gw->walk_bytes;
0776 goto out;
0777 }
0778
0779 scatterwalk_unmap(gw->walk_ptr);
0780 gw->walk_ptr = NULL;
0781
0782 gw->ptr = gw->buf;
0783 gw->nbytes = sizeof(gw->buf);
0784
0785 out:
0786 return gw->nbytes;
0787 }
0788
0789 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
0790 {
0791 if (gw->ptr == NULL)
0792 return 0;
0793
0794 if (gw->ptr == gw->buf) {
0795 int n = gw->buf_bytes - bytesdone;
0796 if (n > 0) {
0797 memmove(gw->buf, gw->buf + bytesdone, n);
0798 gw->buf_bytes = n;
0799 } else
0800 gw->buf_bytes = 0;
0801 } else
0802 _gcm_sg_unmap_and_advance(gw, bytesdone);
0803
0804 return bytesdone;
0805 }
0806
0807 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
0808 {
0809 int i, n;
0810
0811 if (gw->ptr == NULL)
0812 return 0;
0813
0814 if (gw->ptr == gw->buf) {
0815 for (i = 0; i < bytesdone; i += n) {
0816 if (!_gcm_sg_clamp_and_map(gw))
0817 return i;
0818 n = min(gw->walk_bytes, bytesdone - i);
0819 memcpy(gw->walk_ptr, gw->buf + i, n);
0820 _gcm_sg_unmap_and_advance(gw, n);
0821 }
0822 } else
0823 _gcm_sg_unmap_and_advance(gw, bytesdone);
0824
0825 return bytesdone;
0826 }
0827
0828 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
0829 {
0830 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
0831 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
0832 unsigned int ivsize = crypto_aead_ivsize(tfm);
0833 unsigned int taglen = crypto_aead_authsize(tfm);
0834 unsigned int aadlen = req->assoclen;
0835 unsigned int pclen = req->cryptlen;
0836 int ret = 0;
0837
0838 unsigned int n, len, in_bytes, out_bytes,
0839 min_bytes, bytes, aad_bytes, pc_bytes;
0840 struct gcm_sg_walk gw_in, gw_out;
0841 u8 tag[GHASH_DIGEST_SIZE];
0842
0843 struct {
0844 u32 _[3];
0845 u32 cv;
0846 u8 t[GHASH_DIGEST_SIZE];
0847 u8 h[AES_BLOCK_SIZE];
0848 u64 taadl;
0849 u64 tpcl;
0850 u8 j0[GHASH_BLOCK_SIZE];
0851 u8 k[AES_MAX_KEY_SIZE];
0852 } param;
0853
0854
0855
0856
0857
0858
0859
0860
0861
0862
0863 if (flags & CPACF_DECRYPT)
0864 pclen -= taglen;
0865 len = aadlen + pclen;
0866
0867 memset(¶m, 0, sizeof(param));
0868 param.cv = 1;
0869 param.taadl = aadlen * 8;
0870 param.tpcl = pclen * 8;
0871 memcpy(param.j0, req->iv, ivsize);
0872 *(u32 *)(param.j0 + ivsize) = 1;
0873 memcpy(param.k, ctx->key, ctx->key_len);
0874
0875 gcm_walk_start(&gw_in, req->src, len);
0876 gcm_walk_start(&gw_out, req->dst, len);
0877
0878 do {
0879 min_bytes = min_t(unsigned int,
0880 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
0881 in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
0882 out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
0883 bytes = min(in_bytes, out_bytes);
0884
0885 if (aadlen + pclen <= bytes) {
0886 aad_bytes = aadlen;
0887 pc_bytes = pclen;
0888 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
0889 } else {
0890 if (aadlen <= bytes) {
0891 aad_bytes = aadlen;
0892 pc_bytes = (bytes - aadlen) &
0893 ~(AES_BLOCK_SIZE - 1);
0894 flags |= CPACF_KMA_LAAD;
0895 } else {
0896 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
0897 pc_bytes = 0;
0898 }
0899 }
0900
0901 if (aad_bytes > 0)
0902 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
0903
0904 cpacf_kma(ctx->fc | flags, ¶m,
0905 gw_out.ptr + aad_bytes,
0906 gw_in.ptr + aad_bytes, pc_bytes,
0907 gw_in.ptr, aad_bytes);
0908
0909 n = aad_bytes + pc_bytes;
0910 if (gcm_in_walk_done(&gw_in, n) != n)
0911 return -ENOMEM;
0912 if (gcm_out_walk_done(&gw_out, n) != n)
0913 return -ENOMEM;
0914 aadlen -= aad_bytes;
0915 pclen -= pc_bytes;
0916 } while (aadlen + pclen > 0);
0917
0918 if (flags & CPACF_DECRYPT) {
0919 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
0920 if (crypto_memneq(tag, param.t, taglen))
0921 ret = -EBADMSG;
0922 } else
0923 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
0924
0925 memzero_explicit(¶m, sizeof(param));
0926 return ret;
0927 }
0928
0929 static int gcm_aes_encrypt(struct aead_request *req)
0930 {
0931 return gcm_aes_crypt(req, CPACF_ENCRYPT);
0932 }
0933
0934 static int gcm_aes_decrypt(struct aead_request *req)
0935 {
0936 return gcm_aes_crypt(req, CPACF_DECRYPT);
0937 }
0938
0939 static struct aead_alg gcm_aes_aead = {
0940 .setkey = gcm_aes_setkey,
0941 .setauthsize = gcm_aes_setauthsize,
0942 .encrypt = gcm_aes_encrypt,
0943 .decrypt = gcm_aes_decrypt,
0944
0945 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
0946 .maxauthsize = GHASH_DIGEST_SIZE,
0947 .chunksize = AES_BLOCK_SIZE,
0948
0949 .base = {
0950 .cra_blocksize = 1,
0951 .cra_ctxsize = sizeof(struct s390_aes_ctx),
0952 .cra_priority = 900,
0953 .cra_name = "gcm(aes)",
0954 .cra_driver_name = "gcm-aes-s390",
0955 .cra_module = THIS_MODULE,
0956 },
0957 };
0958
0959 static struct crypto_alg *aes_s390_alg;
0960 static struct skcipher_alg *aes_s390_skcipher_algs[4];
0961 static int aes_s390_skciphers_num;
0962 static struct aead_alg *aes_s390_aead_alg;
0963
0964 static int aes_s390_register_skcipher(struct skcipher_alg *alg)
0965 {
0966 int ret;
0967
0968 ret = crypto_register_skcipher(alg);
0969 if (!ret)
0970 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
0971 return ret;
0972 }
0973
0974 static void aes_s390_fini(void)
0975 {
0976 if (aes_s390_alg)
0977 crypto_unregister_alg(aes_s390_alg);
0978 while (aes_s390_skciphers_num--)
0979 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
0980 if (ctrblk)
0981 free_page((unsigned long) ctrblk);
0982
0983 if (aes_s390_aead_alg)
0984 crypto_unregister_aead(aes_s390_aead_alg);
0985 }
0986
0987 static int __init aes_s390_init(void)
0988 {
0989 int ret;
0990
0991
0992 cpacf_query(CPACF_KM, &km_functions);
0993 cpacf_query(CPACF_KMC, &kmc_functions);
0994 cpacf_query(CPACF_KMCTR, &kmctr_functions);
0995 cpacf_query(CPACF_KMA, &kma_functions);
0996
0997 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
0998 cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
0999 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1000 ret = crypto_register_alg(&aes_alg);
1001 if (ret)
1002 goto out_err;
1003 aes_s390_alg = &aes_alg;
1004 ret = aes_s390_register_skcipher(&ecb_aes_alg);
1005 if (ret)
1006 goto out_err;
1007 }
1008
1009 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1010 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1011 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1012 ret = aes_s390_register_skcipher(&cbc_aes_alg);
1013 if (ret)
1014 goto out_err;
1015 }
1016
1017 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1018 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1019 ret = aes_s390_register_skcipher(&xts_aes_alg);
1020 if (ret)
1021 goto out_err;
1022 }
1023
1024 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1025 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1026 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1027 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1028 if (!ctrblk) {
1029 ret = -ENOMEM;
1030 goto out_err;
1031 }
1032 ret = aes_s390_register_skcipher(&ctr_aes_alg);
1033 if (ret)
1034 goto out_err;
1035 }
1036
1037 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1038 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1039 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1040 ret = crypto_register_aead(&gcm_aes_aead);
1041 if (ret)
1042 goto out_err;
1043 aes_s390_aead_alg = &gcm_aes_aead;
1044 }
1045
1046 return 0;
1047 out_err:
1048 aes_s390_fini();
1049 return ret;
1050 }
1051
1052 module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1053 module_exit(aes_s390_fini);
1054
1055 MODULE_ALIAS_CRYPTO("aes-all");
1056
1057 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1058 MODULE_LICENSE("GPL");
1059 MODULE_IMPORT_NS(CRYPTO_INTERNAL);