0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <crypto/internal/aead.h>
0011 #include <crypto/internal/hash.h>
0012 #include <crypto/aes.h>
0013 #include <crypto/sha2.h>
0014 #include <crypto/algapi.h>
0015 #include <crypto/scatterwalk.h>
0016 #include <linux/module.h>
0017 #include <linux/moduleparam.h>
0018 #include <linux/types.h>
0019 #include <linux/mm.h>
0020 #include <linux/scatterlist.h>
0021 #include <linux/device.h>
0022 #include <linux/of.h>
0023 #include <asm/hvcall.h>
0024 #include <asm/vio.h>
0025
0026 #include "nx_csbcpb.h"
0027 #include "nx.h"
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040 int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
0041 struct vio_pfo_op *op,
0042 u32 may_sleep)
0043 {
0044 int rc, retries = 10;
0045 struct vio_dev *viodev = nx_driver.viodev;
0046
0047 atomic_inc(&(nx_ctx->stats->sync_ops));
0048
0049 do {
0050 rc = vio_h_cop_sync(viodev, op);
0051 } while (rc == -EBUSY && !may_sleep && retries--);
0052
0053 if (rc) {
0054 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
0055 "hcall rc: %ld\n", rc, op->hcall_err);
0056 atomic_inc(&(nx_ctx->stats->errors));
0057 atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
0058 atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
0059 }
0060
0061 return rc;
0062 }
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
0079 u8 *start_addr,
0080 unsigned int *len,
0081 u32 sgmax)
0082 {
0083 unsigned int sg_len = 0;
0084 struct nx_sg *sg;
0085 u64 sg_addr = (u64)start_addr;
0086 u64 end_addr;
0087
0088
0089
0090 if (is_vmalloc_addr(start_addr))
0091 sg_addr = page_to_phys(vmalloc_to_page(start_addr))
0092 + offset_in_page(sg_addr);
0093 else
0094 sg_addr = __pa(sg_addr);
0095
0096 end_addr = sg_addr + *len;
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108 for (sg = sg_head; sg_len < *len; sg++) {
0109 u64 next_page;
0110
0111 sg->addr = sg_addr;
0112 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
0113 end_addr);
0114
0115 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
0116 sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
0117 sg_len += sg->len;
0118
0119 if (sg_addr >= next_page &&
0120 is_vmalloc_addr(start_addr + sg_len)) {
0121 sg_addr = page_to_phys(vmalloc_to_page(
0122 start_addr + sg_len));
0123 end_addr = sg_addr + *len - sg_len;
0124 }
0125
0126 if ((sg - sg_head) == sgmax) {
0127 pr_err("nx: scatter/gather list overflow, pid: %d\n",
0128 current->pid);
0129 sg++;
0130 break;
0131 }
0132 }
0133 *len = sg_len;
0134
0135
0136 return sg;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148 struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
0149 unsigned int sglen,
0150 struct scatterlist *sg_src,
0151 unsigned int start,
0152 unsigned int *src_len)
0153 {
0154 struct scatter_walk walk;
0155 struct nx_sg *nx_sg = nx_dst;
0156 unsigned int n, offset = 0, len = *src_len;
0157 char *dst;
0158
0159
0160 for (;;) {
0161 scatterwalk_start(&walk, sg_src);
0162
0163 if (start < offset + sg_src->length)
0164 break;
0165
0166 offset += sg_src->length;
0167 sg_src = sg_next(sg_src);
0168 }
0169
0170
0171
0172 scatterwalk_advance(&walk, start - offset);
0173
0174 while (len && (nx_sg - nx_dst) < sglen) {
0175 n = scatterwalk_clamp(&walk, len);
0176 if (!n) {
0177
0178
0179 scatterwalk_start(&walk, sg_next(walk.sg));
0180 n = scatterwalk_clamp(&walk, len);
0181 }
0182 dst = scatterwalk_map(&walk);
0183
0184 nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
0185 len -= n;
0186
0187 scatterwalk_unmap(dst);
0188 scatterwalk_advance(&walk, n);
0189 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
0190 }
0191
0192 *src_len -= len;
0193
0194
0195 return nx_sg;
0196 }
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 static long int trim_sg_list(struct nx_sg *sg,
0207 struct nx_sg *end,
0208 unsigned int delta,
0209 unsigned int *nbytes)
0210 {
0211 long int oplen;
0212 long int data_back;
0213 unsigned int is_delta = delta;
0214
0215 while (delta && end > sg) {
0216 struct nx_sg *last = end - 1;
0217
0218 if (last->len > delta) {
0219 last->len -= delta;
0220 delta = 0;
0221 } else {
0222 end--;
0223 delta -= last->len;
0224 }
0225 }
0226
0227
0228
0229
0230
0231
0232 oplen = (sg - end) * sizeof(struct nx_sg);
0233 if (is_delta) {
0234 data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
0235 data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
0236 *nbytes -= data_back;
0237 }
0238
0239 return oplen;
0240 }
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259 int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
0260 const u8 *iv,
0261 struct scatterlist *dst,
0262 struct scatterlist *src,
0263 unsigned int *nbytes,
0264 unsigned int offset,
0265 u8 *oiv)
0266 {
0267 unsigned int delta = 0;
0268 unsigned int total = *nbytes;
0269 struct nx_sg *nx_insg = nx_ctx->in_sg;
0270 struct nx_sg *nx_outsg = nx_ctx->out_sg;
0271 unsigned int max_sg_len;
0272
0273 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
0274 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
0275 max_sg_len = min_t(u64, max_sg_len,
0276 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
0277
0278 if (oiv)
0279 memcpy(oiv, iv, AES_BLOCK_SIZE);
0280
0281 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
0282
0283 nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
0284 offset, nbytes);
0285 nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
0286 offset, nbytes);
0287
0288 if (*nbytes < total)
0289 delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
0290
0291
0292
0293
0294 nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
0295 nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
0296
0297 return 0;
0298 }
0299
0300
0301
0302
0303
0304
0305
0306 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
0307 {
0308 spin_lock_init(&nx_ctx->lock);
0309 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
0310 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
0311
0312 nx_ctx->op.flags = function;
0313 nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
0314 nx_ctx->op.in = __pa(nx_ctx->in_sg);
0315 nx_ctx->op.out = __pa(nx_ctx->out_sg);
0316
0317 if (nx_ctx->csbcpb_aead) {
0318 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
0319
0320 nx_ctx->op_aead.flags = function;
0321 nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
0322 nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
0323 nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
0324 }
0325 }
0326
0327 static void nx_of_update_status(struct device *dev,
0328 struct property *p,
0329 struct nx_of *props)
0330 {
0331 if (!strncmp(p->value, "okay", p->length)) {
0332 props->status = NX_WAITING;
0333 props->flags |= NX_OF_FLAG_STATUS_SET;
0334 } else {
0335 dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
0336 (char *)p->value);
0337 }
0338 }
0339
0340 static void nx_of_update_sglen(struct device *dev,
0341 struct property *p,
0342 struct nx_of *props)
0343 {
0344 if (p->length != sizeof(props->max_sg_len)) {
0345 dev_err(dev, "%s: unexpected format for "
0346 "ibm,max-sg-len property\n", __func__);
0347 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
0348 "long, expected %zd bytes\n", __func__,
0349 p->length, sizeof(props->max_sg_len));
0350 return;
0351 }
0352
0353 props->max_sg_len = *(u32 *)p->value;
0354 props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
0355 }
0356
0357 static void nx_of_update_msc(struct device *dev,
0358 struct property *p,
0359 struct nx_of *props)
0360 {
0361 struct msc_triplet *trip;
0362 struct max_sync_cop *msc;
0363 unsigned int bytes_so_far, i, lenp;
0364
0365 msc = (struct max_sync_cop *)p->value;
0366 lenp = p->length;
0367
0368
0369
0370
0371
0372 bytes_so_far = 0;
0373
0374 while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
0375 bytes_so_far += sizeof(struct max_sync_cop);
0376
0377 trip = msc->trip;
0378
0379 for (i = 0;
0380 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
0381 i < msc->triplets;
0382 i++) {
0383 if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
0384 dev_err(dev, "unknown function code/mode "
0385 "combo: %d/%d (ignored)\n", msc->fc,
0386 msc->mode);
0387 goto next_loop;
0388 }
0389
0390 if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
0391 dev_warn(dev, "bogus sglen/databytelen: "
0392 "%u/%u (ignored)\n", trip->sglen,
0393 trip->databytelen);
0394 goto next_loop;
0395 }
0396
0397 switch (trip->keybitlen) {
0398 case 128:
0399 case 160:
0400 props->ap[msc->fc][msc->mode][0].databytelen =
0401 trip->databytelen;
0402 props->ap[msc->fc][msc->mode][0].sglen =
0403 trip->sglen;
0404 break;
0405 case 192:
0406 props->ap[msc->fc][msc->mode][1].databytelen =
0407 trip->databytelen;
0408 props->ap[msc->fc][msc->mode][1].sglen =
0409 trip->sglen;
0410 break;
0411 case 256:
0412 if (msc->fc == NX_FC_AES) {
0413 props->ap[msc->fc][msc->mode][2].
0414 databytelen = trip->databytelen;
0415 props->ap[msc->fc][msc->mode][2].sglen =
0416 trip->sglen;
0417 } else if (msc->fc == NX_FC_AES_HMAC ||
0418 msc->fc == NX_FC_SHA) {
0419 props->ap[msc->fc][msc->mode][1].
0420 databytelen = trip->databytelen;
0421 props->ap[msc->fc][msc->mode][1].sglen =
0422 trip->sglen;
0423 } else {
0424 dev_warn(dev, "unknown function "
0425 "code/key bit len combo"
0426 ": (%u/256)\n", msc->fc);
0427 }
0428 break;
0429 case 512:
0430 props->ap[msc->fc][msc->mode][2].databytelen =
0431 trip->databytelen;
0432 props->ap[msc->fc][msc->mode][2].sglen =
0433 trip->sglen;
0434 break;
0435 default:
0436 dev_warn(dev, "unknown function code/key bit "
0437 "len combo: (%u/%u)\n", msc->fc,
0438 trip->keybitlen);
0439 break;
0440 }
0441 next_loop:
0442 bytes_so_far += sizeof(struct msc_triplet);
0443 trip++;
0444 }
0445
0446 msc = (struct max_sync_cop *)trip;
0447 }
0448
0449 props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 static void nx_of_init(struct device *dev, struct nx_of *props)
0464 {
0465 struct device_node *base_node = dev->of_node;
0466 struct property *p;
0467
0468 p = of_find_property(base_node, "status", NULL);
0469 if (!p)
0470 dev_info(dev, "%s: property 'status' not found\n", __func__);
0471 else
0472 nx_of_update_status(dev, p, props);
0473
0474 p = of_find_property(base_node, "ibm,max-sg-len", NULL);
0475 if (!p)
0476 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
0477 __func__);
0478 else
0479 nx_of_update_sglen(dev, p, props);
0480
0481 p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
0482 if (!p)
0483 dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
0484 __func__);
0485 else
0486 nx_of_update_msc(dev, p, props);
0487 }
0488
0489 static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
0490 {
0491 struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
0492
0493 if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
0494 if (dev)
0495 dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
0496 "%u/%u (ignored)\n", fc, mode, slot,
0497 props->sglen, props->databytelen);
0498 return false;
0499 }
0500
0501 return true;
0502 }
0503
0504 static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
0505 {
0506 int i;
0507
0508 for (i = 0; i < 3; i++)
0509 if (!nx_check_prop(dev, fc, mode, i))
0510 return false;
0511
0512 return true;
0513 }
0514
0515 static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
0516 {
0517 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
0518 crypto_register_skcipher(alg) : 0;
0519 }
0520
0521 static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
0522 {
0523 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
0524 crypto_register_aead(alg) : 0;
0525 }
0526
0527 static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
0528 {
0529 return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
0530 fc, mode, slot) :
0531 nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
0532 crypto_register_shash(alg) : 0;
0533 }
0534
0535 static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
0536 {
0537 if (nx_check_props(NULL, fc, mode))
0538 crypto_unregister_skcipher(alg);
0539 }
0540
0541 static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
0542 {
0543 if (nx_check_props(NULL, fc, mode))
0544 crypto_unregister_aead(alg);
0545 }
0546
0547 static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
0548 int slot)
0549 {
0550 if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
0551 nx_check_props(NULL, fc, mode))
0552 crypto_unregister_shash(alg);
0553 }
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 static int nx_register_algs(void)
0565 {
0566 int rc = -1;
0567
0568 if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
0569 goto out;
0570
0571 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
0572
0573 NX_DEBUGFS_INIT(&nx_driver);
0574
0575 nx_driver.of.status = NX_OKAY;
0576
0577 rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
0578 if (rc)
0579 goto out;
0580
0581 rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
0582 if (rc)
0583 goto out_unreg_ecb;
0584
0585 rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
0586 NX_MODE_AES_CTR);
0587 if (rc)
0588 goto out_unreg_cbc;
0589
0590 rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
0591 if (rc)
0592 goto out_unreg_ctr3686;
0593
0594 rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
0595 if (rc)
0596 goto out_unreg_gcm;
0597
0598 rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
0599 if (rc)
0600 goto out_unreg_gcm4106;
0601
0602 rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
0603 if (rc)
0604 goto out_unreg_ccm;
0605
0606 rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
0607 NX_PROPS_SHA256);
0608 if (rc)
0609 goto out_unreg_ccm4309;
0610
0611 rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
0612 NX_PROPS_SHA512);
0613 if (rc)
0614 goto out_unreg_s256;
0615
0616 rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
0617 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
0618 if (rc)
0619 goto out_unreg_s512;
0620
0621 goto out;
0622
0623 out_unreg_s512:
0624 nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
0625 NX_PROPS_SHA512);
0626 out_unreg_s256:
0627 nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
0628 NX_PROPS_SHA256);
0629 out_unreg_ccm4309:
0630 nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
0631 out_unreg_ccm:
0632 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
0633 out_unreg_gcm4106:
0634 nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
0635 out_unreg_gcm:
0636 nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
0637 out_unreg_ctr3686:
0638 nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
0639 out_unreg_cbc:
0640 nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
0641 out_unreg_ecb:
0642 nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
0643 out:
0644 return rc;
0645 }
0646
0647
0648
0649
0650
0651
0652
0653
0654 static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
0655 {
0656 if (nx_driver.of.status != NX_OKAY) {
0657 pr_err("Attempt to initialize NX crypto context while device "
0658 "is not available!\n");
0659 return -ENODEV;
0660 }
0661
0662
0663 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
0664 nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
0665 sizeof(struct nx_csbcpb);
0666 else
0667 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
0668 sizeof(struct nx_csbcpb);
0669
0670 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
0671 if (!nx_ctx->kmem)
0672 return -ENOMEM;
0673
0674
0675 nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
0676 (u64)NX_PAGE_SIZE));
0677 nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
0678 nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
0679
0680 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
0681 nx_ctx->csbcpb_aead =
0682 (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
0683 NX_PAGE_SIZE);
0684
0685
0686
0687 nx_ctx->stats = &nx_driver.stats;
0688 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
0689 sizeof(struct alg_props) * 3);
0690
0691 return 0;
0692 }
0693
0694
0695 int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
0696 {
0697 crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
0698 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
0699 NX_MODE_AES_CCM);
0700 }
0701
0702 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
0703 {
0704 crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
0705 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
0706 NX_MODE_AES_GCM);
0707 }
0708
0709 int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
0710 {
0711 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
0712 NX_MODE_AES_CTR);
0713 }
0714
0715 int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
0716 {
0717 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
0718 NX_MODE_AES_CBC);
0719 }
0720
0721 int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
0722 {
0723 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
0724 NX_MODE_AES_ECB);
0725 }
0726
0727 int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
0728 {
0729 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
0730 }
0731
0732 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
0733 {
0734 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
0735 NX_MODE_AES_XCBC_MAC);
0736 }
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
0747 {
0748 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
0749
0750 kfree_sensitive(nx_ctx->kmem);
0751 nx_ctx->csbcpb = NULL;
0752 nx_ctx->csbcpb_aead = NULL;
0753 nx_ctx->in_sg = NULL;
0754 nx_ctx->out_sg = NULL;
0755 }
0756
0757 void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
0758 {
0759 nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
0760 }
0761
0762 void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
0763 {
0764 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
0765
0766 kfree_sensitive(nx_ctx->kmem);
0767 }
0768
0769 static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
0770 {
0771 dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
0772 viodev->name, viodev->resource_id);
0773
0774 if (nx_driver.viodev) {
0775 dev_err(&viodev->dev, "%s: Attempt to register more than one "
0776 "instance of the hardware\n", __func__);
0777 return -EINVAL;
0778 }
0779
0780 nx_driver.viodev = viodev;
0781
0782 nx_of_init(&viodev->dev, &nx_driver.of);
0783
0784 return nx_register_algs();
0785 }
0786
0787 static void nx_remove(struct vio_dev *viodev)
0788 {
0789 dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
0790 viodev->unit_address);
0791
0792 if (nx_driver.of.status == NX_OKAY) {
0793 NX_DEBUGFS_FINI(&nx_driver);
0794
0795 nx_unregister_shash(&nx_shash_aes_xcbc_alg,
0796 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
0797 nx_unregister_shash(&nx_shash_sha512_alg,
0798 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
0799 nx_unregister_shash(&nx_shash_sha256_alg,
0800 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
0801 nx_unregister_aead(&nx_ccm4309_aes_alg,
0802 NX_FC_AES, NX_MODE_AES_CCM);
0803 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
0804 nx_unregister_aead(&nx_gcm4106_aes_alg,
0805 NX_FC_AES, NX_MODE_AES_GCM);
0806 nx_unregister_aead(&nx_gcm_aes_alg,
0807 NX_FC_AES, NX_MODE_AES_GCM);
0808 nx_unregister_skcipher(&nx_ctr3686_aes_alg,
0809 NX_FC_AES, NX_MODE_AES_CTR);
0810 nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
0811 NX_MODE_AES_CBC);
0812 nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
0813 NX_MODE_AES_ECB);
0814 }
0815 }
0816
0817
0818
0819 static int __init nx_init(void)
0820 {
0821 return vio_register_driver(&nx_driver.viodriver);
0822 }
0823
0824 static void __exit nx_fini(void)
0825 {
0826 vio_unregister_driver(&nx_driver.viodriver);
0827 }
0828
0829 static const struct vio_device_id nx_crypto_driver_ids[] = {
0830 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
0831 { "", "" }
0832 };
0833 MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
0834
0835
0836 struct nx_crypto_driver nx_driver = {
0837 .viodriver = {
0838 .id_table = nx_crypto_driver_ids,
0839 .probe = nx_probe,
0840 .remove = nx_remove,
0841 .name = NX_NAME,
0842 },
0843 };
0844
0845 module_init(nx_init);
0846 module_exit(nx_fini);
0847
0848 MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
0849 MODULE_DESCRIPTION(NX_STRING);
0850 MODULE_LICENSE("GPL");
0851 MODULE_VERSION(NX_VERSION);