0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <crypto/internal/aead.h>
0011 #include <crypto/aes.h>
0012 #include <crypto/algapi.h>
0013 #include <crypto/scatterwalk.h>
0014 #include <linux/module.h>
0015 #include <linux/types.h>
0016 #include <linux/crypto.h>
0017 #include <asm/vio.h>
0018
0019 #include "nx_csbcpb.h"
0020 #include "nx.h"
0021
0022
0023 static int ccm_aes_nx_set_key(struct crypto_aead *tfm,
0024 const u8 *in_key,
0025 unsigned int key_len)
0026 {
0027 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
0028 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
0029 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
0030
0031 nx_ctx_init(nx_ctx, HCOP_FC_AES);
0032
0033 switch (key_len) {
0034 case AES_KEYSIZE_128:
0035 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
0036 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
0037 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
0038 break;
0039 default:
0040 return -EINVAL;
0041 }
0042
0043 csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM;
0044 memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len);
0045
0046 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA;
0047 memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len);
0048
0049 return 0;
0050
0051 }
0052
0053 static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm,
0054 const u8 *in_key,
0055 unsigned int key_len)
0056 {
0057 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
0058
0059 if (key_len < 3)
0060 return -EINVAL;
0061
0062 key_len -= 3;
0063
0064 memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3);
0065
0066 return ccm_aes_nx_set_key(tfm, in_key, key_len);
0067 }
0068
0069 static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm,
0070 unsigned int authsize)
0071 {
0072 switch (authsize) {
0073 case 4:
0074 case 6:
0075 case 8:
0076 case 10:
0077 case 12:
0078 case 14:
0079 case 16:
0080 break;
0081 default:
0082 return -EINVAL;
0083 }
0084
0085 return 0;
0086 }
0087
0088 static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm,
0089 unsigned int authsize)
0090 {
0091 switch (authsize) {
0092 case 8:
0093 case 12:
0094 case 16:
0095 break;
0096 default:
0097 return -EINVAL;
0098 }
0099
0100 return 0;
0101 }
0102
0103
0104 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
0105 {
0106 __be32 data;
0107
0108 memset(block, 0, csize);
0109 block += csize;
0110
0111 if (csize >= 4)
0112 csize = 4;
0113 else if (msglen > (unsigned int)(1 << (8 * csize)))
0114 return -EOVERFLOW;
0115
0116 data = cpu_to_be32(msglen);
0117 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
0118
0119 return 0;
0120 }
0121
0122
0123 static inline int crypto_ccm_check_iv(const u8 *iv)
0124 {
0125
0126 if (1 > iv[0] || iv[0] > 7)
0127 return -EINVAL;
0128
0129 return 0;
0130 }
0131
0132
0133 static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize,
0134 unsigned int cryptlen, u8 *b0)
0135 {
0136 unsigned int l, lp, m = authsize;
0137 int rc;
0138
0139 memcpy(b0, iv, 16);
0140
0141 lp = b0[0];
0142 l = lp + 1;
0143
0144
0145 *b0 |= (8 * ((m - 2) / 2));
0146
0147
0148 if (assoclen)
0149 *b0 |= 64;
0150
0151 rc = set_msg_len(b0 + 16 - l, cryptlen, l);
0152
0153 return rc;
0154 }
0155
0156 static int generate_pat(u8 *iv,
0157 struct aead_request *req,
0158 struct nx_crypto_ctx *nx_ctx,
0159 unsigned int authsize,
0160 unsigned int nbytes,
0161 unsigned int assoclen,
0162 u8 *out)
0163 {
0164 struct nx_sg *nx_insg = nx_ctx->in_sg;
0165 struct nx_sg *nx_outsg = nx_ctx->out_sg;
0166 unsigned int iauth_len = 0;
0167 u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
0168 int rc;
0169 unsigned int max_sg_len;
0170
0171
0172 memset(iv + 15 - iv[0], 0, iv[0] + 1);
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 if (!assoclen) {
0189 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
0190 } else if (assoclen <= 14) {
0191
0192
0193
0194 b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
0195 b1 = nx_ctx->priv.ccm.iauth_tag;
0196 iauth_len = assoclen;
0197 } else if (assoclen <= 65280) {
0198
0199
0200
0201 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
0202 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
0203 iauth_len = 14;
0204 } else {
0205 b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
0206 b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
0207 iauth_len = 10;
0208 }
0209
0210
0211 rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
0212 if (rc)
0213 return rc;
0214
0215
0216
0217
0218
0219 if (b1) {
0220 memset(b1, 0, 16);
0221 if (assoclen <= 65280) {
0222 *(u16 *)b1 = assoclen;
0223 scatterwalk_map_and_copy(b1 + 2, req->src, 0,
0224 iauth_len, SCATTERWALK_FROM_SG);
0225 } else {
0226 *(u16 *)b1 = (u16)(0xfffe);
0227 *(u32 *)&b1[2] = assoclen;
0228 scatterwalk_map_and_copy(b1 + 6, req->src, 0,
0229 iauth_len, SCATTERWALK_FROM_SG);
0230 }
0231 }
0232
0233
0234 if (!assoclen) {
0235 return rc;
0236 } else if (assoclen <= 14) {
0237 unsigned int len = 16;
0238
0239 nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
0240
0241 if (len != 16)
0242 return -EINVAL;
0243
0244 nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
0245 nx_ctx->ap->sglen);
0246
0247 if (len != 16)
0248 return -EINVAL;
0249
0250
0251
0252 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
0253 sizeof(struct nx_sg);
0254 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) *
0255 sizeof(struct nx_sg);
0256
0257 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
0258 NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
0259
0260 result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
0261
0262 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
0263 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
0264 if (rc)
0265 return rc;
0266
0267 atomic_inc(&(nx_ctx->stats->aes_ops));
0268 atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
0269
0270 } else {
0271 unsigned int processed = 0, to_process;
0272
0273 processed += iauth_len;
0274
0275
0276 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
0277 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
0278 max_sg_len = min_t(u64, max_sg_len,
0279 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
0280
0281 do {
0282 to_process = min_t(u32, assoclen - processed,
0283 nx_ctx->ap->databytelen);
0284
0285 nx_insg = nx_walk_and_build(nx_ctx->in_sg,
0286 nx_ctx->ap->sglen,
0287 req->src, processed,
0288 &to_process);
0289
0290 if ((to_process + processed) < assoclen) {
0291 NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
0292 NX_FDM_INTERMEDIATE;
0293 } else {
0294 NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
0295 ~NX_FDM_INTERMEDIATE;
0296 }
0297
0298
0299 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
0300 sizeof(struct nx_sg);
0301
0302 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
0303
0304 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
0305 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
0306 if (rc)
0307 return rc;
0308
0309 memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
0310 nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
0311 AES_BLOCK_SIZE);
0312
0313 NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
0314
0315 atomic_inc(&(nx_ctx->stats->aes_ops));
0316 atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
0317
0318 processed += to_process;
0319 } while (processed < assoclen);
0320
0321 result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
0322 }
0323
0324 memcpy(out, result, AES_BLOCK_SIZE);
0325
0326 return rc;
0327 }
0328
0329 static int ccm_nx_decrypt(struct aead_request *req,
0330 u8 *iv,
0331 unsigned int assoclen)
0332 {
0333 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
0334 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
0335 unsigned int nbytes = req->cryptlen;
0336 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
0337 struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
0338 unsigned long irq_flags;
0339 unsigned int processed = 0, to_process;
0340 int rc = -1;
0341
0342 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
0343
0344 nbytes -= authsize;
0345
0346
0347 scatterwalk_map_and_copy(priv->oauth_tag,
0348 req->src, nbytes + req->assoclen, authsize,
0349 SCATTERWALK_FROM_SG);
0350
0351 rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
0352 csbcpb->cpb.aes_ccm.in_pat_or_b0);
0353 if (rc)
0354 goto out;
0355
0356 do {
0357
0358
0359
0360
0361 to_process = nbytes - processed;
0362
0363 if ((to_process + processed) < nbytes)
0364 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
0365 else
0366 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
0367
0368 NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
0369
0370 rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
0371 &to_process, processed + req->assoclen,
0372 csbcpb->cpb.aes_ccm.iv_or_ctr);
0373 if (rc)
0374 goto out;
0375
0376 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
0377 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
0378 if (rc)
0379 goto out;
0380
0381
0382
0383
0384 memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
0385 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
0386 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
0387 memcpy(csbcpb->cpb.aes_ccm.in_s0,
0388 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
0389
0390 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
0391
0392
0393 atomic_inc(&(nx_ctx->stats->aes_ops));
0394 atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
0395 &(nx_ctx->stats->aes_bytes));
0396
0397 processed += to_process;
0398 } while (processed < nbytes);
0399
0400 rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
0401 authsize) ? -EBADMSG : 0;
0402 out:
0403 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
0404 return rc;
0405 }
0406
0407 static int ccm_nx_encrypt(struct aead_request *req,
0408 u8 *iv,
0409 unsigned int assoclen)
0410 {
0411 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
0412 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
0413 unsigned int nbytes = req->cryptlen;
0414 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
0415 unsigned long irq_flags;
0416 unsigned int processed = 0, to_process;
0417 int rc = -1;
0418
0419 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
0420
0421 rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen,
0422 csbcpb->cpb.aes_ccm.in_pat_or_b0);
0423 if (rc)
0424 goto out;
0425
0426 do {
0427
0428
0429
0430 to_process = nbytes - processed;
0431
0432 if ((to_process + processed) < nbytes)
0433 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
0434 else
0435 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
0436
0437 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
0438
0439 rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src,
0440 &to_process, processed + req->assoclen,
0441 csbcpb->cpb.aes_ccm.iv_or_ctr);
0442 if (rc)
0443 goto out;
0444
0445 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
0446 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
0447 if (rc)
0448 goto out;
0449
0450
0451
0452
0453 memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
0454 memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
0455 csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
0456 memcpy(csbcpb->cpb.aes_ccm.in_s0,
0457 csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
0458
0459 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
0460
0461
0462 atomic_inc(&(nx_ctx->stats->aes_ops));
0463 atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
0464 &(nx_ctx->stats->aes_bytes));
0465
0466 processed += to_process;
0467
0468 } while (processed < nbytes);
0469
0470
0471 scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
0472 req->dst, nbytes + req->assoclen, authsize,
0473 SCATTERWALK_TO_SG);
0474
0475 out:
0476 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
0477 return rc;
0478 }
0479
0480 static int ccm4309_aes_nx_encrypt(struct aead_request *req)
0481 {
0482 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
0483 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
0484 u8 *iv = rctx->iv;
0485
0486 iv[0] = 3;
0487 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
0488 memcpy(iv + 4, req->iv, 8);
0489
0490 return ccm_nx_encrypt(req, iv, req->assoclen - 8);
0491 }
0492
0493 static int ccm_aes_nx_encrypt(struct aead_request *req)
0494 {
0495 int rc;
0496
0497 rc = crypto_ccm_check_iv(req->iv);
0498 if (rc)
0499 return rc;
0500
0501 return ccm_nx_encrypt(req, req->iv, req->assoclen);
0502 }
0503
0504 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
0505 {
0506 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
0507 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
0508 u8 *iv = rctx->iv;
0509
0510 iv[0] = 3;
0511 memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
0512 memcpy(iv + 4, req->iv, 8);
0513
0514 return ccm_nx_decrypt(req, iv, req->assoclen - 8);
0515 }
0516
0517 static int ccm_aes_nx_decrypt(struct aead_request *req)
0518 {
0519 int rc;
0520
0521 rc = crypto_ccm_check_iv(req->iv);
0522 if (rc)
0523 return rc;
0524
0525 return ccm_nx_decrypt(req, req->iv, req->assoclen);
0526 }
0527
0528 struct aead_alg nx_ccm_aes_alg = {
0529 .base = {
0530 .cra_name = "ccm(aes)",
0531 .cra_driver_name = "ccm-aes-nx",
0532 .cra_priority = 300,
0533 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
0534 .cra_blocksize = 1,
0535 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
0536 .cra_module = THIS_MODULE,
0537 },
0538 .init = nx_crypto_ctx_aes_ccm_init,
0539 .exit = nx_crypto_ctx_aead_exit,
0540 .ivsize = AES_BLOCK_SIZE,
0541 .maxauthsize = AES_BLOCK_SIZE,
0542 .setkey = ccm_aes_nx_set_key,
0543 .setauthsize = ccm_aes_nx_setauthsize,
0544 .encrypt = ccm_aes_nx_encrypt,
0545 .decrypt = ccm_aes_nx_decrypt,
0546 };
0547
0548 struct aead_alg nx_ccm4309_aes_alg = {
0549 .base = {
0550 .cra_name = "rfc4309(ccm(aes))",
0551 .cra_driver_name = "rfc4309-ccm-aes-nx",
0552 .cra_priority = 300,
0553 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
0554 .cra_blocksize = 1,
0555 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
0556 .cra_module = THIS_MODULE,
0557 },
0558 .init = nx_crypto_ctx_aes_ccm_init,
0559 .exit = nx_crypto_ctx_aead_exit,
0560 .ivsize = 8,
0561 .maxauthsize = AES_BLOCK_SIZE,
0562 .setkey = ccm4309_aes_nx_set_key,
0563 .setauthsize = ccm4309_aes_nx_setauthsize,
0564 .encrypt = ccm4309_aes_nx_encrypt,
0565 .decrypt = ccm4309_aes_nx_decrypt,
0566 };