0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "compat.h"
0012 #include "regs.h"
0013 #include "intern.h"
0014 #include "jr.h"
0015 #include "error.h"
0016 #include "desc_constr.h"
0017 #include "sg_sw_sec4.h"
0018 #include "caampkc.h"
0019
0020 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
0021 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
0022 SIZEOF_RSA_PRIV_F1_PDB)
0023 #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
0024 SIZEOF_RSA_PRIV_F2_PDB)
0025 #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
0026 SIZEOF_RSA_PRIV_F3_PDB)
0027 #define CAAM_RSA_MAX_INPUT_SIZE 512
0028
0029
0030 static u8 *zero_buffer;
0031
0032
0033
0034
0035
0036 static bool init_done;
0037
0038 struct caam_akcipher_alg {
0039 struct akcipher_alg akcipher;
0040 bool registered;
0041 };
0042
0043 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
0044 struct akcipher_request *req)
0045 {
0046 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0047
0048 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
0049 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
0050
0051 if (edesc->sec4_sg_bytes)
0052 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
0053 DMA_TO_DEVICE);
0054 }
0055
0056 static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
0057 struct akcipher_request *req)
0058 {
0059 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0060 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0061 struct caam_rsa_key *key = &ctx->key;
0062 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
0063
0064 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
0065 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
0066 }
0067
0068 static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
0069 struct akcipher_request *req)
0070 {
0071 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0072 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0073 struct caam_rsa_key *key = &ctx->key;
0074 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
0075
0076 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
0077 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
0078 }
0079
0080 static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
0081 struct akcipher_request *req)
0082 {
0083 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0084 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0085 struct caam_rsa_key *key = &ctx->key;
0086 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
0087 size_t p_sz = key->p_sz;
0088 size_t q_sz = key->q_sz;
0089
0090 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
0091 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
0092 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
0093 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
0094 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
0095 }
0096
0097 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
0098 struct akcipher_request *req)
0099 {
0100 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0101 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0102 struct caam_rsa_key *key = &ctx->key;
0103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
0104 size_t p_sz = key->p_sz;
0105 size_t q_sz = key->q_sz;
0106
0107 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
0108 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
0109 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
0110 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
0111 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
0112 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
0113 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
0114 }
0115
0116
0117 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
0118 {
0119 struct akcipher_request *req = context;
0120 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0121 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
0122 struct rsa_edesc *edesc;
0123 int ecode = 0;
0124 bool has_bklog;
0125
0126 if (err)
0127 ecode = caam_jr_strstatus(dev, err);
0128
0129 edesc = req_ctx->edesc;
0130 has_bklog = edesc->bklog;
0131
0132 rsa_pub_unmap(dev, edesc, req);
0133 rsa_io_unmap(dev, edesc, req);
0134 kfree(edesc);
0135
0136
0137
0138
0139
0140 if (!has_bklog)
0141 akcipher_request_complete(req, ecode);
0142 else
0143 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
0144 }
0145
0146 static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
0147 void *context)
0148 {
0149 struct akcipher_request *req = context;
0150 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0151 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
0152 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0153 struct caam_rsa_key *key = &ctx->key;
0154 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0155 struct rsa_edesc *edesc;
0156 int ecode = 0;
0157 bool has_bklog;
0158
0159 if (err)
0160 ecode = caam_jr_strstatus(dev, err);
0161
0162 edesc = req_ctx->edesc;
0163 has_bklog = edesc->bklog;
0164
0165 switch (key->priv_form) {
0166 case FORM1:
0167 rsa_priv_f1_unmap(dev, edesc, req);
0168 break;
0169 case FORM2:
0170 rsa_priv_f2_unmap(dev, edesc, req);
0171 break;
0172 case FORM3:
0173 rsa_priv_f3_unmap(dev, edesc, req);
0174 }
0175
0176 rsa_io_unmap(dev, edesc, req);
0177 kfree(edesc);
0178
0179
0180
0181
0182
0183 if (!has_bklog)
0184 akcipher_request_complete(req, ecode);
0185 else
0186 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
0198 unsigned int nbytes,
0199 unsigned int flags)
0200 {
0201 struct sg_mapping_iter miter;
0202 int lzeros, ents;
0203 unsigned int len;
0204 unsigned int tbytes = nbytes;
0205 const u8 *buff;
0206
0207 ents = sg_nents_for_len(sgl, nbytes);
0208 if (ents < 0)
0209 return ents;
0210
0211 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
0212
0213 lzeros = 0;
0214 len = 0;
0215 while (nbytes > 0) {
0216
0217 while (len && !*buff && lzeros < nbytes) {
0218 lzeros++;
0219 len--;
0220 buff++;
0221 }
0222
0223 if (len && *buff)
0224 break;
0225
0226 sg_miter_next(&miter);
0227 buff = miter.addr;
0228 len = miter.length;
0229
0230 nbytes -= lzeros;
0231 lzeros = 0;
0232 }
0233
0234 miter.consumed = lzeros;
0235 sg_miter_stop(&miter);
0236 nbytes -= lzeros;
0237
0238 return tbytes - nbytes;
0239 }
0240
0241 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
0242 size_t desclen)
0243 {
0244 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0245 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0246 struct device *dev = ctx->dev;
0247 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0248 struct caam_rsa_key *key = &ctx->key;
0249 struct rsa_edesc *edesc;
0250 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
0251 GFP_KERNEL : GFP_ATOMIC;
0252 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
0253 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
0254 int src_nents, dst_nents;
0255 int mapped_src_nents, mapped_dst_nents;
0256 unsigned int diff_size = 0;
0257 int lzeros;
0258
0259 if (req->src_len > key->n_sz) {
0260
0261
0262
0263
0264 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
0265 key->n_sz, sg_flags);
0266 if (lzeros < 0)
0267 return ERR_PTR(lzeros);
0268
0269 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
0270 lzeros);
0271 req_ctx->fixup_src_len = req->src_len - lzeros;
0272 } else {
0273
0274
0275
0276
0277 diff_size = key->n_sz - req->src_len;
0278 req_ctx->fixup_src = req->src;
0279 req_ctx->fixup_src_len = req->src_len;
0280 }
0281
0282 src_nents = sg_nents_for_len(req_ctx->fixup_src,
0283 req_ctx->fixup_src_len);
0284 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
0285
0286 mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
0287 DMA_TO_DEVICE);
0288 if (unlikely(!mapped_src_nents)) {
0289 dev_err(dev, "unable to map source\n");
0290 return ERR_PTR(-ENOMEM);
0291 }
0292 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
0293 DMA_FROM_DEVICE);
0294 if (unlikely(!mapped_dst_nents)) {
0295 dev_err(dev, "unable to map destination\n");
0296 goto src_fail;
0297 }
0298
0299 if (!diff_size && mapped_src_nents == 1)
0300 sec4_sg_len = 0;
0301 else
0302 sec4_sg_len = mapped_src_nents + !!diff_size;
0303 sec4_sg_index = sec4_sg_len;
0304
0305 if (mapped_dst_nents > 1)
0306 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
0307 else
0308 sec4_sg_len = pad_sg_nents(sec4_sg_len);
0309
0310 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
0311
0312
0313 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
0314 GFP_DMA | flags);
0315 if (!edesc)
0316 goto dst_fail;
0317
0318 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
0319 if (diff_size)
0320 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
0321 0);
0322
0323 if (sec4_sg_index)
0324 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
0325 edesc->sec4_sg + !!diff_size, 0);
0326
0327 if (mapped_dst_nents > 1)
0328 sg_to_sec4_sg_last(req->dst, req->dst_len,
0329 edesc->sec4_sg + sec4_sg_index, 0);
0330
0331
0332 edesc->src_nents = src_nents;
0333 edesc->dst_nents = dst_nents;
0334
0335 req_ctx->edesc = edesc;
0336
0337 if (!sec4_sg_bytes)
0338 return edesc;
0339
0340 edesc->mapped_src_nents = mapped_src_nents;
0341 edesc->mapped_dst_nents = mapped_dst_nents;
0342
0343 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
0344 sec4_sg_bytes, DMA_TO_DEVICE);
0345 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
0346 dev_err(dev, "unable to map S/G table\n");
0347 goto sec4_sg_fail;
0348 }
0349
0350 edesc->sec4_sg_bytes = sec4_sg_bytes;
0351
0352 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
0353 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
0354 edesc->sec4_sg_bytes, 1);
0355
0356 return edesc;
0357
0358 sec4_sg_fail:
0359 kfree(edesc);
0360 dst_fail:
0361 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
0362 src_fail:
0363 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
0364 return ERR_PTR(-ENOMEM);
0365 }
0366
0367 static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
0368 {
0369 struct akcipher_request *req = container_of(areq,
0370 struct akcipher_request,
0371 base);
0372 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0373 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0374 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0375 struct device *jrdev = ctx->dev;
0376 u32 *desc = req_ctx->edesc->hw_desc;
0377 int ret;
0378
0379 req_ctx->edesc->bklog = true;
0380
0381 ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
0382
0383 if (ret == -ENOSPC && engine->retry_support)
0384 return ret;
0385
0386 if (ret != -EINPROGRESS) {
0387 rsa_pub_unmap(jrdev, req_ctx->edesc, req);
0388 rsa_io_unmap(jrdev, req_ctx->edesc, req);
0389 kfree(req_ctx->edesc);
0390 } else {
0391 ret = 0;
0392 }
0393
0394 return ret;
0395 }
0396
0397 static int set_rsa_pub_pdb(struct akcipher_request *req,
0398 struct rsa_edesc *edesc)
0399 {
0400 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0401 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0402 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0403 struct caam_rsa_key *key = &ctx->key;
0404 struct device *dev = ctx->dev;
0405 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
0406 int sec4_sg_index = 0;
0407
0408 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
0409 if (dma_mapping_error(dev, pdb->n_dma)) {
0410 dev_err(dev, "Unable to map RSA modulus memory\n");
0411 return -ENOMEM;
0412 }
0413
0414 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
0415 if (dma_mapping_error(dev, pdb->e_dma)) {
0416 dev_err(dev, "Unable to map RSA public exponent memory\n");
0417 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
0418 return -ENOMEM;
0419 }
0420
0421 if (edesc->mapped_src_nents > 1) {
0422 pdb->sgf |= RSA_PDB_SGF_F;
0423 pdb->f_dma = edesc->sec4_sg_dma;
0424 sec4_sg_index += edesc->mapped_src_nents;
0425 } else {
0426 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
0427 }
0428
0429 if (edesc->mapped_dst_nents > 1) {
0430 pdb->sgf |= RSA_PDB_SGF_G;
0431 pdb->g_dma = edesc->sec4_sg_dma +
0432 sec4_sg_index * sizeof(struct sec4_sg_entry);
0433 } else {
0434 pdb->g_dma = sg_dma_address(req->dst);
0435 }
0436
0437 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
0438 pdb->f_len = req_ctx->fixup_src_len;
0439
0440 return 0;
0441 }
0442
0443 static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
0444 struct rsa_edesc *edesc)
0445 {
0446 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0447 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0448 struct caam_rsa_key *key = &ctx->key;
0449 struct device *dev = ctx->dev;
0450 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
0451 int sec4_sg_index = 0;
0452
0453 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
0454 if (dma_mapping_error(dev, pdb->n_dma)) {
0455 dev_err(dev, "Unable to map modulus memory\n");
0456 return -ENOMEM;
0457 }
0458
0459 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
0460 if (dma_mapping_error(dev, pdb->d_dma)) {
0461 dev_err(dev, "Unable to map RSA private exponent memory\n");
0462 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
0463 return -ENOMEM;
0464 }
0465
0466 if (edesc->mapped_src_nents > 1) {
0467 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
0468 pdb->g_dma = edesc->sec4_sg_dma;
0469 sec4_sg_index += edesc->mapped_src_nents;
0470
0471 } else {
0472 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0473
0474 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
0475 }
0476
0477 if (edesc->mapped_dst_nents > 1) {
0478 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
0479 pdb->f_dma = edesc->sec4_sg_dma +
0480 sec4_sg_index * sizeof(struct sec4_sg_entry);
0481 } else {
0482 pdb->f_dma = sg_dma_address(req->dst);
0483 }
0484
0485 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
0486
0487 return 0;
0488 }
0489
0490 static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
0491 struct rsa_edesc *edesc)
0492 {
0493 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0494 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0495 struct caam_rsa_key *key = &ctx->key;
0496 struct device *dev = ctx->dev;
0497 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
0498 int sec4_sg_index = 0;
0499 size_t p_sz = key->p_sz;
0500 size_t q_sz = key->q_sz;
0501
0502 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
0503 if (dma_mapping_error(dev, pdb->d_dma)) {
0504 dev_err(dev, "Unable to map RSA private exponent memory\n");
0505 return -ENOMEM;
0506 }
0507
0508 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
0509 if (dma_mapping_error(dev, pdb->p_dma)) {
0510 dev_err(dev, "Unable to map RSA prime factor p memory\n");
0511 goto unmap_d;
0512 }
0513
0514 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
0515 if (dma_mapping_error(dev, pdb->q_dma)) {
0516 dev_err(dev, "Unable to map RSA prime factor q memory\n");
0517 goto unmap_p;
0518 }
0519
0520 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
0521 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
0522 dev_err(dev, "Unable to map RSA tmp1 memory\n");
0523 goto unmap_q;
0524 }
0525
0526 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
0527 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
0528 dev_err(dev, "Unable to map RSA tmp2 memory\n");
0529 goto unmap_tmp1;
0530 }
0531
0532 if (edesc->mapped_src_nents > 1) {
0533 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
0534 pdb->g_dma = edesc->sec4_sg_dma;
0535 sec4_sg_index += edesc->mapped_src_nents;
0536 } else {
0537 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0538
0539 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
0540 }
0541
0542 if (edesc->mapped_dst_nents > 1) {
0543 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
0544 pdb->f_dma = edesc->sec4_sg_dma +
0545 sec4_sg_index * sizeof(struct sec4_sg_entry);
0546 } else {
0547 pdb->f_dma = sg_dma_address(req->dst);
0548 }
0549
0550 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
0551 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
0552
0553 return 0;
0554
0555 unmap_tmp1:
0556 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
0557 unmap_q:
0558 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
0559 unmap_p:
0560 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
0561 unmap_d:
0562 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
0563
0564 return -ENOMEM;
0565 }
0566
0567 static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
0568 struct rsa_edesc *edesc)
0569 {
0570 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0571 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0572 struct caam_rsa_key *key = &ctx->key;
0573 struct device *dev = ctx->dev;
0574 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
0575 int sec4_sg_index = 0;
0576 size_t p_sz = key->p_sz;
0577 size_t q_sz = key->q_sz;
0578
0579 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
0580 if (dma_mapping_error(dev, pdb->p_dma)) {
0581 dev_err(dev, "Unable to map RSA prime factor p memory\n");
0582 return -ENOMEM;
0583 }
0584
0585 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
0586 if (dma_mapping_error(dev, pdb->q_dma)) {
0587 dev_err(dev, "Unable to map RSA prime factor q memory\n");
0588 goto unmap_p;
0589 }
0590
0591 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
0592 if (dma_mapping_error(dev, pdb->dp_dma)) {
0593 dev_err(dev, "Unable to map RSA exponent dp memory\n");
0594 goto unmap_q;
0595 }
0596
0597 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
0598 if (dma_mapping_error(dev, pdb->dq_dma)) {
0599 dev_err(dev, "Unable to map RSA exponent dq memory\n");
0600 goto unmap_dp;
0601 }
0602
0603 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
0604 if (dma_mapping_error(dev, pdb->c_dma)) {
0605 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
0606 goto unmap_dq;
0607 }
0608
0609 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
0610 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
0611 dev_err(dev, "Unable to map RSA tmp1 memory\n");
0612 goto unmap_qinv;
0613 }
0614
0615 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
0616 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
0617 dev_err(dev, "Unable to map RSA tmp2 memory\n");
0618 goto unmap_tmp1;
0619 }
0620
0621 if (edesc->mapped_src_nents > 1) {
0622 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
0623 pdb->g_dma = edesc->sec4_sg_dma;
0624 sec4_sg_index += edesc->mapped_src_nents;
0625 } else {
0626 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0627
0628 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
0629 }
0630
0631 if (edesc->mapped_dst_nents > 1) {
0632 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
0633 pdb->f_dma = edesc->sec4_sg_dma +
0634 sec4_sg_index * sizeof(struct sec4_sg_entry);
0635 } else {
0636 pdb->f_dma = sg_dma_address(req->dst);
0637 }
0638
0639 pdb->sgf |= key->n_sz;
0640 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
0641
0642 return 0;
0643
0644 unmap_tmp1:
0645 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
0646 unmap_qinv:
0647 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
0648 unmap_dq:
0649 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
0650 unmap_dp:
0651 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
0652 unmap_q:
0653 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
0654 unmap_p:
0655 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
0656
0657 return -ENOMEM;
0658 }
0659
0660 static int akcipher_enqueue_req(struct device *jrdev,
0661 void (*cbk)(struct device *jrdev, u32 *desc,
0662 u32 err, void *context),
0663 struct akcipher_request *req)
0664 {
0665 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
0666 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0667 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0668 struct caam_rsa_key *key = &ctx->key;
0669 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
0670 struct rsa_edesc *edesc = req_ctx->edesc;
0671 u32 *desc = edesc->hw_desc;
0672 int ret;
0673
0674 req_ctx->akcipher_op_done = cbk;
0675
0676
0677
0678
0679
0680 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
0681 ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
0682 req);
0683 else
0684 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
0685
0686 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
0687 switch (key->priv_form) {
0688 case FORM1:
0689 rsa_priv_f1_unmap(jrdev, edesc, req);
0690 break;
0691 case FORM2:
0692 rsa_priv_f2_unmap(jrdev, edesc, req);
0693 break;
0694 case FORM3:
0695 rsa_priv_f3_unmap(jrdev, edesc, req);
0696 break;
0697 default:
0698 rsa_pub_unmap(jrdev, edesc, req);
0699 }
0700 rsa_io_unmap(jrdev, edesc, req);
0701 kfree(edesc);
0702 }
0703
0704 return ret;
0705 }
0706
0707 static int caam_rsa_enc(struct akcipher_request *req)
0708 {
0709 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0710 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0711 struct caam_rsa_key *key = &ctx->key;
0712 struct device *jrdev = ctx->dev;
0713 struct rsa_edesc *edesc;
0714 int ret;
0715
0716 if (unlikely(!key->n || !key->e))
0717 return -EINVAL;
0718
0719 if (req->dst_len < key->n_sz) {
0720 req->dst_len = key->n_sz;
0721 dev_err(jrdev, "Output buffer length less than parameter n\n");
0722 return -EOVERFLOW;
0723 }
0724
0725
0726 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
0727 if (IS_ERR(edesc))
0728 return PTR_ERR(edesc);
0729
0730
0731 ret = set_rsa_pub_pdb(req, edesc);
0732 if (ret)
0733 goto init_fail;
0734
0735
0736 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
0737
0738 return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
0739
0740 init_fail:
0741 rsa_io_unmap(jrdev, edesc, req);
0742 kfree(edesc);
0743 return ret;
0744 }
0745
0746 static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
0747 {
0748 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0749 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0750 struct device *jrdev = ctx->dev;
0751 struct rsa_edesc *edesc;
0752 int ret;
0753
0754
0755 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
0756 if (IS_ERR(edesc))
0757 return PTR_ERR(edesc);
0758
0759
0760 ret = set_rsa_priv_f1_pdb(req, edesc);
0761 if (ret)
0762 goto init_fail;
0763
0764
0765 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
0766
0767 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
0768
0769 init_fail:
0770 rsa_io_unmap(jrdev, edesc, req);
0771 kfree(edesc);
0772 return ret;
0773 }
0774
0775 static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
0776 {
0777 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0778 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0779 struct device *jrdev = ctx->dev;
0780 struct rsa_edesc *edesc;
0781 int ret;
0782
0783
0784 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
0785 if (IS_ERR(edesc))
0786 return PTR_ERR(edesc);
0787
0788
0789 ret = set_rsa_priv_f2_pdb(req, edesc);
0790 if (ret)
0791 goto init_fail;
0792
0793
0794 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
0795
0796 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
0797
0798 init_fail:
0799 rsa_io_unmap(jrdev, edesc, req);
0800 kfree(edesc);
0801 return ret;
0802 }
0803
0804 static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
0805 {
0806 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0807 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0808 struct device *jrdev = ctx->dev;
0809 struct rsa_edesc *edesc;
0810 int ret;
0811
0812
0813 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
0814 if (IS_ERR(edesc))
0815 return PTR_ERR(edesc);
0816
0817
0818 ret = set_rsa_priv_f3_pdb(req, edesc);
0819 if (ret)
0820 goto init_fail;
0821
0822
0823 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
0824
0825 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
0826
0827 init_fail:
0828 rsa_io_unmap(jrdev, edesc, req);
0829 kfree(edesc);
0830 return ret;
0831 }
0832
0833 static int caam_rsa_dec(struct akcipher_request *req)
0834 {
0835 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
0836 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0837 struct caam_rsa_key *key = &ctx->key;
0838 int ret;
0839
0840 if (unlikely(!key->n || !key->d))
0841 return -EINVAL;
0842
0843 if (req->dst_len < key->n_sz) {
0844 req->dst_len = key->n_sz;
0845 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
0846 return -EOVERFLOW;
0847 }
0848
0849 if (key->priv_form == FORM3)
0850 ret = caam_rsa_dec_priv_f3(req);
0851 else if (key->priv_form == FORM2)
0852 ret = caam_rsa_dec_priv_f2(req);
0853 else
0854 ret = caam_rsa_dec_priv_f1(req);
0855
0856 return ret;
0857 }
0858
0859 static void caam_rsa_free_key(struct caam_rsa_key *key)
0860 {
0861 kfree_sensitive(key->d);
0862 kfree_sensitive(key->p);
0863 kfree_sensitive(key->q);
0864 kfree_sensitive(key->dp);
0865 kfree_sensitive(key->dq);
0866 kfree_sensitive(key->qinv);
0867 kfree_sensitive(key->tmp1);
0868 kfree_sensitive(key->tmp2);
0869 kfree(key->e);
0870 kfree(key->n);
0871 memset(key, 0, sizeof(*key));
0872 }
0873
0874 static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
0875 {
0876 while (!**ptr && *nbytes) {
0877 (*ptr)++;
0878 (*nbytes)--;
0879 }
0880 }
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893 static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
0894 {
0895 u8 *dst;
0896
0897 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
0898 if (!nbytes)
0899 return NULL;
0900
0901 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
0902 if (!dst)
0903 return NULL;
0904
0905 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
0906
0907 return dst;
0908 }
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919 static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
0920 {
0921
0922 caam_rsa_drop_leading_zeros(&buf, nbytes);
0923 if (!*nbytes)
0924 return NULL;
0925
0926 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
0927 }
0928
0929 static int caam_rsa_check_key_length(unsigned int len)
0930 {
0931 if (len > 4096)
0932 return -EINVAL;
0933 return 0;
0934 }
0935
0936 static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
0937 unsigned int keylen)
0938 {
0939 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
0940 struct rsa_key raw_key = {NULL};
0941 struct caam_rsa_key *rsa_key = &ctx->key;
0942 int ret;
0943
0944
0945 caam_rsa_free_key(rsa_key);
0946
0947 ret = rsa_parse_pub_key(&raw_key, key, keylen);
0948 if (ret)
0949 return ret;
0950
0951
0952 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
0953 if (!rsa_key->e)
0954 goto err;
0955
0956
0957
0958
0959
0960
0961
0962 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
0963 if (!rsa_key->n)
0964 goto err;
0965
0966 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
0967 caam_rsa_free_key(rsa_key);
0968 return -EINVAL;
0969 }
0970
0971 rsa_key->e_sz = raw_key.e_sz;
0972 rsa_key->n_sz = raw_key.n_sz;
0973
0974 return 0;
0975 err:
0976 caam_rsa_free_key(rsa_key);
0977 return -ENOMEM;
0978 }
0979
0980 static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
0981 struct rsa_key *raw_key)
0982 {
0983 struct caam_rsa_key *rsa_key = &ctx->key;
0984 size_t p_sz = raw_key->p_sz;
0985 size_t q_sz = raw_key->q_sz;
0986
0987 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
0988 if (!rsa_key->p)
0989 return;
0990 rsa_key->p_sz = p_sz;
0991
0992 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
0993 if (!rsa_key->q)
0994 goto free_p;
0995 rsa_key->q_sz = q_sz;
0996
0997 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
0998 if (!rsa_key->tmp1)
0999 goto free_q;
1000
1001 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
1002 if (!rsa_key->tmp2)
1003 goto free_tmp1;
1004
1005 rsa_key->priv_form = FORM2;
1006
1007 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1008 if (!rsa_key->dp)
1009 goto free_tmp2;
1010
1011 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1012 if (!rsa_key->dq)
1013 goto free_dp;
1014
1015 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1016 q_sz);
1017 if (!rsa_key->qinv)
1018 goto free_dq;
1019
1020 rsa_key->priv_form = FORM3;
1021
1022 return;
1023
1024 free_dq:
1025 kfree_sensitive(rsa_key->dq);
1026 free_dp:
1027 kfree_sensitive(rsa_key->dp);
1028 free_tmp2:
1029 kfree_sensitive(rsa_key->tmp2);
1030 free_tmp1:
1031 kfree_sensitive(rsa_key->tmp1);
1032 free_q:
1033 kfree_sensitive(rsa_key->q);
1034 free_p:
1035 kfree_sensitive(rsa_key->p);
1036 }
1037
1038 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1039 unsigned int keylen)
1040 {
1041 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1042 struct rsa_key raw_key = {NULL};
1043 struct caam_rsa_key *rsa_key = &ctx->key;
1044 int ret;
1045
1046
1047 caam_rsa_free_key(rsa_key);
1048
1049 ret = rsa_parse_priv_key(&raw_key, key, keylen);
1050 if (ret)
1051 return ret;
1052
1053
1054 rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
1055 if (!rsa_key->d)
1056 goto err;
1057
1058 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
1059 if (!rsa_key->e)
1060 goto err;
1061
1062
1063
1064
1065
1066
1067
1068 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1069 if (!rsa_key->n)
1070 goto err;
1071
1072 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1073 caam_rsa_free_key(rsa_key);
1074 return -EINVAL;
1075 }
1076
1077 rsa_key->d_sz = raw_key.d_sz;
1078 rsa_key->e_sz = raw_key.e_sz;
1079 rsa_key->n_sz = raw_key.n_sz;
1080
1081 caam_rsa_set_priv_key_form(ctx, &raw_key);
1082
1083 return 0;
1084
1085 err:
1086 caam_rsa_free_key(rsa_key);
1087 return -ENOMEM;
1088 }
1089
1090 static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
1091 {
1092 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1093
1094 return ctx->key.n_sz;
1095 }
1096
1097
1098 static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1099 {
1100 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1101
1102 ctx->dev = caam_jr_alloc();
1103
1104 if (IS_ERR(ctx->dev)) {
1105 pr_err("Job Ring Device allocation for transform failed\n");
1106 return PTR_ERR(ctx->dev);
1107 }
1108
1109 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1110 CAAM_RSA_MAX_INPUT_SIZE - 1,
1111 DMA_TO_DEVICE);
1112 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1113 dev_err(ctx->dev, "unable to map padding\n");
1114 caam_jr_free(ctx->dev);
1115 return -ENOMEM;
1116 }
1117
1118 ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1119
1120 return 0;
1121 }
1122
1123
1124 static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1125 {
1126 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1127 struct caam_rsa_key *key = &ctx->key;
1128
1129 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1130 1, DMA_TO_DEVICE);
1131 caam_rsa_free_key(key);
1132 caam_jr_free(ctx->dev);
1133 }
1134
1135 static struct caam_akcipher_alg caam_rsa = {
1136 .akcipher = {
1137 .encrypt = caam_rsa_enc,
1138 .decrypt = caam_rsa_dec,
1139 .set_pub_key = caam_rsa_set_pub_key,
1140 .set_priv_key = caam_rsa_set_priv_key,
1141 .max_size = caam_rsa_max_size,
1142 .init = caam_rsa_init_tfm,
1143 .exit = caam_rsa_exit_tfm,
1144 .reqsize = sizeof(struct caam_rsa_req_ctx),
1145 .base = {
1146 .cra_name = "rsa",
1147 .cra_driver_name = "rsa-caam",
1148 .cra_priority = 3000,
1149 .cra_module = THIS_MODULE,
1150 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1151 },
1152 }
1153 };
1154
1155
1156 int caam_pkc_init(struct device *ctrldev)
1157 {
1158 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1159 u32 pk_inst, pkha;
1160 int err;
1161 init_done = false;
1162
1163
1164 if (priv->era < 10) {
1165 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1166 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1167 } else {
1168 pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1169 pk_inst = pkha & CHA_VER_NUM_MASK;
1170
1171
1172
1173
1174
1175
1176
1177 if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1178 pk_inst = 0;
1179 }
1180
1181
1182 if (!pk_inst)
1183 return 0;
1184
1185
1186 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1187 GFP_KERNEL);
1188 if (!zero_buffer)
1189 return -ENOMEM;
1190
1191 err = crypto_register_akcipher(&caam_rsa.akcipher);
1192
1193 if (err) {
1194 kfree(zero_buffer);
1195 dev_warn(ctrldev, "%s alg registration failed\n",
1196 caam_rsa.akcipher.base.cra_driver_name);
1197 } else {
1198 init_done = true;
1199 caam_rsa.registered = true;
1200 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1201 }
1202
1203 return err;
1204 }
1205
1206 void caam_pkc_exit(void)
1207 {
1208 if (!init_done)
1209 return;
1210
1211 if (caam_rsa.registered)
1212 crypto_unregister_akcipher(&caam_rsa.akcipher);
1213
1214 kfree(zero_buffer);
1215 }