0001
0002
0003
0004 #include <crypto/internal/aead.h>
0005 #include <crypto/authenc.h>
0006 #include <crypto/scatterwalk.h>
0007 #include <linux/dmapool.h>
0008 #include <linux/dma-mapping.h>
0009
0010 #include "cc_buffer_mgr.h"
0011 #include "cc_lli_defs.h"
0012 #include "cc_cipher.h"
0013 #include "cc_hash.h"
0014 #include "cc_aead.h"
0015
0016 union buffer_array_entry {
0017 struct scatterlist *sgl;
0018 dma_addr_t buffer_dma;
0019 };
0020
0021 struct buffer_array {
0022 unsigned int num_of_buffers;
0023 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI];
0024 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI];
0025 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
0026 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI];
0027 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI];
0028 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
0029 };
0030
0031 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
0032 {
0033 switch (type) {
0034 case CC_DMA_BUF_NULL:
0035 return "BUF_NULL";
0036 case CC_DMA_BUF_DLLI:
0037 return "BUF_DLLI";
0038 case CC_DMA_BUF_MLLI:
0039 return "BUF_MLLI";
0040 default:
0041 return "BUF_INVALID";
0042 }
0043 }
0044
0045
0046
0047
0048
0049
0050
0051
0052 static void cc_copy_mac(struct device *dev, struct aead_request *req,
0053 enum cc_sg_cpy_direct dir)
0054 {
0055 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0056 u32 skip = req->assoclen + req->cryptlen;
0057
0058 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src,
0059 (skip - areq_ctx->req_authsize), skip, dir);
0060 }
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 static unsigned int cc_get_sgl_nents(struct device *dev,
0074 struct scatterlist *sg_list,
0075 unsigned int nbytes, u32 *lbytes)
0076 {
0077 unsigned int nents = 0;
0078
0079 *lbytes = 0;
0080
0081 while (nbytes && sg_list) {
0082 nents++;
0083
0084 *lbytes = nbytes;
0085 nbytes -= (sg_list->length > nbytes) ?
0086 nbytes : sg_list->length;
0087 sg_list = sg_next(sg_list);
0088 }
0089
0090 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
0091 return nents;
0092 }
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
0107 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
0108 {
0109 u32 nents;
0110
0111 nents = sg_nents_for_len(sg, end);
0112 sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip,
0113 (direct == CC_SG_TO_BUF));
0114 }
0115
0116 static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma,
0117 u32 buff_size, u32 *curr_nents,
0118 u32 **mlli_entry_pp)
0119 {
0120 u32 *mlli_entry_p = *mlli_entry_pp;
0121 u32 new_nents;
0122
0123
0124 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
0125 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
0126 dev_err(dev, "Too many mlli entries. current %d max %d\n",
0127 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES);
0128 return -ENOMEM;
0129 }
0130
0131
0132 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
0133 cc_lli_set_addr(mlli_entry_p, buff_dma);
0134 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE);
0135 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
0136 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
0137 mlli_entry_p[LLI_WORD1_OFFSET]);
0138 buff_dma += CC_MAX_MLLI_ENTRY_SIZE;
0139 buff_size -= CC_MAX_MLLI_ENTRY_SIZE;
0140 mlli_entry_p = mlli_entry_p + 2;
0141 (*curr_nents)++;
0142 }
0143
0144 cc_lli_set_addr(mlli_entry_p, buff_dma);
0145 cc_lli_set_size(mlli_entry_p, buff_size);
0146 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n",
0147 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET],
0148 mlli_entry_p[LLI_WORD1_OFFSET]);
0149 mlli_entry_p = mlli_entry_p + 2;
0150 *mlli_entry_pp = mlli_entry_p;
0151 (*curr_nents)++;
0152 return 0;
0153 }
0154
0155 static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
0156 u32 sgl_data_len, u32 sgl_offset,
0157 u32 *curr_nents, u32 **mlli_entry_pp)
0158 {
0159 struct scatterlist *curr_sgl = sgl;
0160 u32 *mlli_entry_p = *mlli_entry_pp;
0161 s32 rc = 0;
0162
0163 for ( ; (curr_sgl && sgl_data_len);
0164 curr_sgl = sg_next(curr_sgl)) {
0165 u32 entry_data_len =
0166 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
0167 sg_dma_len(curr_sgl) - sgl_offset :
0168 sgl_data_len;
0169 sgl_data_len -= entry_data_len;
0170 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
0171 sgl_offset, entry_data_len,
0172 curr_nents, &mlli_entry_p);
0173 if (rc)
0174 return rc;
0175
0176 sgl_offset = 0;
0177 }
0178 *mlli_entry_pp = mlli_entry_p;
0179 return 0;
0180 }
0181
0182 static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data,
0183 struct mlli_params *mlli_params, gfp_t flags)
0184 {
0185 u32 *mlli_p;
0186 u32 total_nents = 0, prev_total_nents = 0;
0187 int rc = 0, i;
0188
0189 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers);
0190
0191
0192 mlli_params->mlli_virt_addr =
0193 dma_pool_alloc(mlli_params->curr_pool, flags,
0194 &mlli_params->mlli_dma_addr);
0195 if (!mlli_params->mlli_virt_addr) {
0196 dev_err(dev, "dma_pool_alloc() failed\n");
0197 rc = -ENOMEM;
0198 goto build_mlli_exit;
0199 }
0200
0201 mlli_p = mlli_params->mlli_virt_addr;
0202
0203 for (i = 0; i < sg_data->num_of_buffers; i++) {
0204 union buffer_array_entry *entry = &sg_data->entry[i];
0205 u32 tot_len = sg_data->total_data_len[i];
0206 u32 offset = sg_data->offset[i];
0207
0208 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset,
0209 &total_nents, &mlli_p);
0210 if (rc)
0211 return rc;
0212
0213
0214 if (sg_data->mlli_nents[i]) {
0215
0216
0217
0218 *sg_data->mlli_nents[i] +=
0219 (total_nents - prev_total_nents);
0220 prev_total_nents = total_nents;
0221 }
0222 }
0223
0224
0225 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE);
0226
0227 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
0228 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr,
0229 mlli_params->mlli_len);
0230
0231 build_mlli_exit:
0232 return rc;
0233 }
0234
0235 static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
0236 unsigned int nents, struct scatterlist *sgl,
0237 unsigned int data_len, unsigned int data_offset,
0238 bool is_last_table, u32 *mlli_nents)
0239 {
0240 unsigned int index = sgl_data->num_of_buffers;
0241
0242 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
0243 index, nents, sgl, data_len, is_last_table);
0244 sgl_data->nents[index] = nents;
0245 sgl_data->entry[index].sgl = sgl;
0246 sgl_data->offset[index] = data_offset;
0247 sgl_data->total_data_len[index] = data_len;
0248 sgl_data->is_last[index] = is_last_table;
0249 sgl_data->mlli_nents[index] = mlli_nents;
0250 if (sgl_data->mlli_nents[index])
0251 *sgl_data->mlli_nents[index] = 0;
0252 sgl_data->num_of_buffers++;
0253 }
0254
0255 static int cc_map_sg(struct device *dev, struct scatterlist *sg,
0256 unsigned int nbytes, int direction, u32 *nents,
0257 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
0258 {
0259 int ret = 0;
0260
0261 if (!nbytes) {
0262 *mapped_nents = 0;
0263 *lbytes = 0;
0264 *nents = 0;
0265 return 0;
0266 }
0267
0268 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
0269 if (*nents > max_sg_nents) {
0270 *nents = 0;
0271 dev_err(dev, "Too many fragments. current %d max %d\n",
0272 *nents, max_sg_nents);
0273 return -ENOMEM;
0274 }
0275
0276 ret = dma_map_sg(dev, sg, *nents, direction);
0277 if (dma_mapping_error(dev, ret)) {
0278 *nents = 0;
0279 dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret);
0280 return -ENOMEM;
0281 }
0282
0283 *mapped_nents = ret;
0284
0285 return 0;
0286 }
0287
0288 static int
0289 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
0290 u8 *config_data, struct buffer_array *sg_data,
0291 unsigned int assoclen)
0292 {
0293 dev_dbg(dev, " handle additional data config set to DLLI\n");
0294
0295 sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
0296 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
0297 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
0298 dev_err(dev, "dma_map_sg() config buffer failed\n");
0299 return -ENOMEM;
0300 }
0301 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
0302 &sg_dma_address(&areq_ctx->ccm_adata_sg),
0303 sg_page(&areq_ctx->ccm_adata_sg),
0304 sg_virt(&areq_ctx->ccm_adata_sg),
0305 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
0306
0307 if (assoclen > 0) {
0308 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
0309 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
0310 0, false, NULL);
0311 }
0312 return 0;
0313 }
0314
0315 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx,
0316 u8 *curr_buff, u32 curr_buff_cnt,
0317 struct buffer_array *sg_data)
0318 {
0319 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt);
0320
0321 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt);
0322 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) {
0323 dev_err(dev, "dma_map_sg() src buffer failed\n");
0324 return -ENOMEM;
0325 }
0326 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
0327 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg),
0328 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset,
0329 areq_ctx->buff_sg->length);
0330 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
0331 areq_ctx->curr_sg = areq_ctx->buff_sg;
0332 areq_ctx->in_nents = 0;
0333
0334 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0,
0335 false, NULL);
0336 return 0;
0337 }
0338
0339 void cc_unmap_cipher_request(struct device *dev, void *ctx,
0340 unsigned int ivsize, struct scatterlist *src,
0341 struct scatterlist *dst)
0342 {
0343 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
0344
0345 if (req_ctx->gen_ctx.iv_dma_addr) {
0346 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
0347 &req_ctx->gen_ctx.iv_dma_addr, ivsize);
0348 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
0349 ivsize, DMA_BIDIRECTIONAL);
0350 }
0351
0352 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
0353 req_ctx->mlli_params.mlli_virt_addr) {
0354 dma_pool_free(req_ctx->mlli_params.curr_pool,
0355 req_ctx->mlli_params.mlli_virt_addr,
0356 req_ctx->mlli_params.mlli_dma_addr);
0357 }
0358
0359 if (src != dst) {
0360 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
0361 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
0362 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
0363 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
0364 } else {
0365 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
0366 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
0367 }
0368 }
0369
0370 int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
0371 unsigned int ivsize, unsigned int nbytes,
0372 void *info, struct scatterlist *src,
0373 struct scatterlist *dst, gfp_t flags)
0374 {
0375 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
0376 struct mlli_params *mlli_params = &req_ctx->mlli_params;
0377 struct device *dev = drvdata_to_dev(drvdata);
0378 struct buffer_array sg_data;
0379 u32 dummy = 0;
0380 int rc = 0;
0381 u32 mapped_nents = 0;
0382 int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
0383
0384 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
0385 mlli_params->curr_pool = NULL;
0386 sg_data.num_of_buffers = 0;
0387
0388
0389 if (ivsize) {
0390 dump_byte_array("iv", info, ivsize);
0391 req_ctx->gen_ctx.iv_dma_addr =
0392 dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL);
0393 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
0394 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
0395 ivsize, info);
0396 return -ENOMEM;
0397 }
0398 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
0399 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
0400 } else {
0401 req_ctx->gen_ctx.iv_dma_addr = 0;
0402 }
0403
0404
0405 rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
0406 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
0407 if (rc)
0408 goto cipher_exit;
0409 if (mapped_nents > 1)
0410 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
0411
0412 if (src == dst) {
0413
0414 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
0415 req_ctx->out_nents = 0;
0416 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
0417 nbytes, 0, true,
0418 &req_ctx->in_mlli_nents);
0419 }
0420 } else {
0421
0422 rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
0423 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
0424 &dummy, &mapped_nents);
0425 if (rc)
0426 goto cipher_exit;
0427 if (mapped_nents > 1)
0428 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
0429
0430 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
0431 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
0432 nbytes, 0, true,
0433 &req_ctx->in_mlli_nents);
0434 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
0435 nbytes, 0, true,
0436 &req_ctx->out_mlli_nents);
0437 }
0438 }
0439
0440 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
0441 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
0442 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
0443 if (rc)
0444 goto cipher_exit;
0445 }
0446
0447 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
0448 cc_dma_buf_type(req_ctx->dma_buf_type));
0449
0450 return 0;
0451
0452 cipher_exit:
0453 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
0454 return rc;
0455 }
0456
0457 void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
0458 {
0459 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0460 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
0461 struct cc_drvdata *drvdata = dev_get_drvdata(dev);
0462 int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
0463
0464 if (areq_ctx->mac_buf_dma_addr) {
0465 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
0466 MAX_MAC_SIZE, DMA_BIDIRECTIONAL);
0467 }
0468
0469 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
0470 if (areq_ctx->hkey_dma_addr) {
0471 dma_unmap_single(dev, areq_ctx->hkey_dma_addr,
0472 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL);
0473 }
0474
0475 if (areq_ctx->gcm_block_len_dma_addr) {
0476 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr,
0477 AES_BLOCK_SIZE, DMA_TO_DEVICE);
0478 }
0479
0480 if (areq_ctx->gcm_iv_inc1_dma_addr) {
0481 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr,
0482 AES_BLOCK_SIZE, DMA_TO_DEVICE);
0483 }
0484
0485 if (areq_ctx->gcm_iv_inc2_dma_addr) {
0486 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr,
0487 AES_BLOCK_SIZE, DMA_TO_DEVICE);
0488 }
0489 }
0490
0491 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
0492 if (areq_ctx->ccm_iv0_dma_addr) {
0493 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr,
0494 AES_BLOCK_SIZE, DMA_TO_DEVICE);
0495 }
0496
0497 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE);
0498 }
0499 if (areq_ctx->gen_ctx.iv_dma_addr) {
0500 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
0501 hw_iv_size, DMA_BIDIRECTIONAL);
0502 kfree_sensitive(areq_ctx->gen_ctx.iv);
0503 }
0504
0505
0506 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
0507 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) &&
0508 (areq_ctx->mlli_params.mlli_virt_addr)) {
0509 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
0510 &areq_ctx->mlli_params.mlli_dma_addr,
0511 areq_ctx->mlli_params.mlli_virt_addr);
0512 dma_pool_free(areq_ctx->mlli_params.curr_pool,
0513 areq_ctx->mlli_params.mlli_virt_addr,
0514 areq_ctx->mlli_params.mlli_dma_addr);
0515 }
0516
0517 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
0518 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
0519 areq_ctx->assoclen, req->cryptlen);
0520
0521 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
0522 if (req->src != req->dst) {
0523 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
0524 sg_virt(req->dst));
0525 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
0526 }
0527 if (drvdata->coherent &&
0528 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
0529 req->src == req->dst) {
0530
0531
0532
0533
0534 cc_copy_mac(dev, req, CC_SG_FROM_BUF);
0535 }
0536 }
0537
0538 static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
0539 u32 last_entry_data_size)
0540 {
0541 return ((sgl_nents > 1) && (last_entry_data_size < authsize));
0542 }
0543
0544 static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
0545 struct aead_request *req,
0546 struct buffer_array *sg_data,
0547 bool is_last, bool do_chain)
0548 {
0549 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0550 unsigned int hw_iv_size = areq_ctx->hw_iv_size;
0551 struct device *dev = drvdata_to_dev(drvdata);
0552 gfp_t flags = cc_gfp_flags(&req->base);
0553 int rc = 0;
0554
0555 if (!req->iv) {
0556 areq_ctx->gen_ctx.iv_dma_addr = 0;
0557 areq_ctx->gen_ctx.iv = NULL;
0558 goto chain_iv_exit;
0559 }
0560
0561 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
0562 if (!areq_ctx->gen_ctx.iv)
0563 return -ENOMEM;
0564
0565 areq_ctx->gen_ctx.iv_dma_addr =
0566 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
0567 DMA_BIDIRECTIONAL);
0568 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
0569 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
0570 hw_iv_size, req->iv);
0571 kfree_sensitive(areq_ctx->gen_ctx.iv);
0572 areq_ctx->gen_ctx.iv = NULL;
0573 rc = -ENOMEM;
0574 goto chain_iv_exit;
0575 }
0576
0577 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
0578 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr);
0579
0580 chain_iv_exit:
0581 return rc;
0582 }
0583
0584 static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
0585 struct aead_request *req,
0586 struct buffer_array *sg_data,
0587 bool is_last, bool do_chain)
0588 {
0589 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0590 int rc = 0;
0591 int mapped_nents = 0;
0592 struct device *dev = drvdata_to_dev(drvdata);
0593
0594 if (!sg_data) {
0595 rc = -EINVAL;
0596 goto chain_assoc_exit;
0597 }
0598
0599 if (areq_ctx->assoclen == 0) {
0600 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
0601 areq_ctx->assoc.nents = 0;
0602 areq_ctx->assoc.mlli_nents = 0;
0603 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n",
0604 cc_dma_buf_type(areq_ctx->assoc_buff_type),
0605 areq_ctx->assoc.nents);
0606 goto chain_assoc_exit;
0607 }
0608
0609 mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen);
0610 if (mapped_nents < 0)
0611 return mapped_nents;
0612
0613 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
0614 dev_err(dev, "Too many fragments. current %d max %d\n",
0615 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
0616 return -ENOMEM;
0617 }
0618 areq_ctx->assoc.nents = mapped_nents;
0619
0620
0621
0622
0623 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
0624 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
0625 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n",
0626 (areq_ctx->assoc.nents + 1),
0627 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
0628 rc = -ENOMEM;
0629 goto chain_assoc_exit;
0630 }
0631 }
0632
0633 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null)
0634 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI;
0635 else
0636 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
0637
0638 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
0639 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n",
0640 cc_dma_buf_type(areq_ctx->assoc_buff_type),
0641 areq_ctx->assoc.nents);
0642 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
0643 areq_ctx->assoclen, 0, is_last,
0644 &areq_ctx->assoc.mlli_nents);
0645 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
0646 }
0647
0648 chain_assoc_exit:
0649 return rc;
0650 }
0651
0652 static void cc_prepare_aead_data_dlli(struct aead_request *req,
0653 u32 *src_last_bytes, u32 *dst_last_bytes)
0654 {
0655 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0656 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
0657 unsigned int authsize = areq_ctx->req_authsize;
0658 struct scatterlist *sg;
0659 ssize_t offset;
0660
0661 areq_ctx->is_icv_fragmented = false;
0662
0663 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
0664 sg = areq_ctx->src_sgl;
0665 offset = *src_last_bytes - authsize;
0666 } else {
0667 sg = areq_ctx->dst_sgl;
0668 offset = *dst_last_bytes - authsize;
0669 }
0670
0671 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
0672 areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
0673 }
0674
0675 static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
0676 struct aead_request *req,
0677 struct buffer_array *sg_data,
0678 u32 *src_last_bytes, u32 *dst_last_bytes,
0679 bool is_last_table)
0680 {
0681 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0682 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
0683 unsigned int authsize = areq_ctx->req_authsize;
0684 struct device *dev = drvdata_to_dev(drvdata);
0685 struct scatterlist *sg;
0686
0687 if (req->src == req->dst) {
0688
0689 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
0690 areq_ctx->src_sgl, areq_ctx->cryptlen,
0691 areq_ctx->src_offset, is_last_table,
0692 &areq_ctx->src.mlli_nents);
0693
0694 areq_ctx->is_icv_fragmented =
0695 cc_is_icv_frag(areq_ctx->src.nents, authsize,
0696 *src_last_bytes);
0697
0698 if (areq_ctx->is_icv_fragmented) {
0699
0700
0701
0702
0703 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
0704
0705
0706
0707
0708
0709 if (!drvdata->coherent)
0710 cc_copy_mac(dev, req, CC_SG_TO_BUF);
0711
0712 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
0713 } else {
0714 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
0715 areq_ctx->icv_dma_addr =
0716 areq_ctx->mac_buf_dma_addr;
0717 }
0718 } else {
0719 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
0720
0721 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
0722 (*src_last_bytes - authsize);
0723 areq_ctx->icv_virt_addr = sg_virt(sg) +
0724 (*src_last_bytes - authsize);
0725 }
0726
0727 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
0728
0729 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
0730 areq_ctx->src_sgl, areq_ctx->cryptlen,
0731 areq_ctx->src_offset, is_last_table,
0732 &areq_ctx->src.mlli_nents);
0733 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
0734 areq_ctx->dst_sgl, areq_ctx->cryptlen,
0735 areq_ctx->dst_offset, is_last_table,
0736 &areq_ctx->dst.mlli_nents);
0737
0738 areq_ctx->is_icv_fragmented =
0739 cc_is_icv_frag(areq_ctx->src.nents, authsize,
0740 *src_last_bytes);
0741
0742
0743
0744
0745
0746 if (areq_ctx->is_icv_fragmented) {
0747 cc_copy_mac(dev, req, CC_SG_TO_BUF);
0748 areq_ctx->icv_virt_addr = areq_ctx->backup_mac;
0749
0750 } else {
0751 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1];
0752
0753 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
0754 (*src_last_bytes - authsize);
0755 areq_ctx->icv_virt_addr = sg_virt(sg) +
0756 (*src_last_bytes - authsize);
0757 }
0758
0759 } else {
0760
0761 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents,
0762 areq_ctx->dst_sgl, areq_ctx->cryptlen,
0763 areq_ctx->dst_offset, is_last_table,
0764 &areq_ctx->dst.mlli_nents);
0765 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents,
0766 areq_ctx->src_sgl, areq_ctx->cryptlen,
0767 areq_ctx->src_offset, is_last_table,
0768 &areq_ctx->src.mlli_nents);
0769
0770 areq_ctx->is_icv_fragmented =
0771 cc_is_icv_frag(areq_ctx->dst.nents, authsize,
0772 *dst_last_bytes);
0773
0774 if (!areq_ctx->is_icv_fragmented) {
0775 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
0776
0777 areq_ctx->icv_dma_addr = sg_dma_address(sg) +
0778 (*dst_last_bytes - authsize);
0779 areq_ctx->icv_virt_addr = sg_virt(sg) +
0780 (*dst_last_bytes - authsize);
0781 } else {
0782 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr;
0783 areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
0784 }
0785 }
0786 }
0787
0788 static int cc_aead_chain_data(struct cc_drvdata *drvdata,
0789 struct aead_request *req,
0790 struct buffer_array *sg_data,
0791 bool is_last_table, bool do_chain)
0792 {
0793 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0794 struct device *dev = drvdata_to_dev(drvdata);
0795 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
0796 unsigned int authsize = areq_ctx->req_authsize;
0797 unsigned int src_last_bytes = 0, dst_last_bytes = 0;
0798 int rc = 0;
0799 u32 src_mapped_nents = 0, dst_mapped_nents = 0;
0800 u32 offset = 0;
0801
0802 unsigned int size_for_map = req->assoclen + req->cryptlen;
0803 u32 sg_index = 0;
0804 u32 size_to_skip = req->assoclen;
0805 struct scatterlist *sgl;
0806
0807 offset = size_to_skip;
0808
0809 if (!sg_data)
0810 return -EINVAL;
0811
0812 areq_ctx->src_sgl = req->src;
0813 areq_ctx->dst_sgl = req->dst;
0814
0815 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
0816 authsize : 0;
0817 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
0818 &src_last_bytes);
0819 sg_index = areq_ctx->src_sgl->length;
0820
0821 while (src_mapped_nents && (sg_index <= size_to_skip)) {
0822 src_mapped_nents--;
0823 offset -= areq_ctx->src_sgl->length;
0824 sgl = sg_next(areq_ctx->src_sgl);
0825 if (!sgl)
0826 break;
0827 areq_ctx->src_sgl = sgl;
0828 sg_index += areq_ctx->src_sgl->length;
0829 }
0830 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
0831 dev_err(dev, "Too many fragments. current %d max %d\n",
0832 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
0833 return -ENOMEM;
0834 }
0835
0836 areq_ctx->src.nents = src_mapped_nents;
0837
0838 areq_ctx->src_offset = offset;
0839
0840 if (req->src != req->dst) {
0841 size_for_map = req->assoclen + req->cryptlen;
0842
0843 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT)
0844 size_for_map += authsize;
0845 else
0846 size_for_map -= authsize;
0847
0848 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
0849 &areq_ctx->dst.mapped_nents,
0850 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
0851 &dst_mapped_nents);
0852 if (rc)
0853 goto chain_data_exit;
0854 }
0855
0856 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
0857 &dst_last_bytes);
0858 sg_index = areq_ctx->dst_sgl->length;
0859 offset = size_to_skip;
0860
0861
0862 while (dst_mapped_nents && sg_index <= size_to_skip) {
0863 dst_mapped_nents--;
0864 offset -= areq_ctx->dst_sgl->length;
0865 sgl = sg_next(areq_ctx->dst_sgl);
0866 if (!sgl)
0867 break;
0868 areq_ctx->dst_sgl = sgl;
0869 sg_index += areq_ctx->dst_sgl->length;
0870 }
0871 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
0872 dev_err(dev, "Too many fragments. current %d max %d\n",
0873 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES);
0874 return -ENOMEM;
0875 }
0876 areq_ctx->dst.nents = dst_mapped_nents;
0877 areq_ctx->dst_offset = offset;
0878 if (src_mapped_nents > 1 ||
0879 dst_mapped_nents > 1 ||
0880 do_chain) {
0881 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
0882 cc_prepare_aead_data_mlli(drvdata, req, sg_data,
0883 &src_last_bytes, &dst_last_bytes,
0884 is_last_table);
0885 } else {
0886 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
0887 cc_prepare_aead_data_dlli(req, &src_last_bytes,
0888 &dst_last_bytes);
0889 }
0890
0891 chain_data_exit:
0892 return rc;
0893 }
0894
0895 static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
0896 struct aead_request *req)
0897 {
0898 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0899 u32 curr_mlli_size = 0;
0900
0901 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) {
0902 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr;
0903 curr_mlli_size = areq_ctx->assoc.mlli_nents *
0904 LLI_ENTRY_BYTE_SIZE;
0905 }
0906
0907 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
0908
0909 if (req->src == req->dst) {
0910 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents;
0911 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr +
0912 curr_mlli_size;
0913 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr;
0914 if (!areq_ctx->is_single_pass)
0915 areq_ctx->assoc.mlli_nents +=
0916 areq_ctx->src.mlli_nents;
0917 } else {
0918 if (areq_ctx->gen_ctx.op_type ==
0919 DRV_CRYPTO_DIRECTION_DECRYPT) {
0920 areq_ctx->src.sram_addr =
0921 drvdata->mlli_sram_addr +
0922 curr_mlli_size;
0923 areq_ctx->dst.sram_addr =
0924 areq_ctx->src.sram_addr +
0925 areq_ctx->src.mlli_nents *
0926 LLI_ENTRY_BYTE_SIZE;
0927 if (!areq_ctx->is_single_pass)
0928 areq_ctx->assoc.mlli_nents +=
0929 areq_ctx->src.mlli_nents;
0930 } else {
0931 areq_ctx->dst.sram_addr =
0932 drvdata->mlli_sram_addr +
0933 curr_mlli_size;
0934 areq_ctx->src.sram_addr =
0935 areq_ctx->dst.sram_addr +
0936 areq_ctx->dst.mlli_nents *
0937 LLI_ENTRY_BYTE_SIZE;
0938 if (!areq_ctx->is_single_pass)
0939 areq_ctx->assoc.mlli_nents +=
0940 areq_ctx->dst.mlli_nents;
0941 }
0942 }
0943 }
0944 }
0945
0946 int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
0947 {
0948 struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
0949 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
0950 struct device *dev = drvdata_to_dev(drvdata);
0951 struct buffer_array sg_data;
0952 unsigned int authsize = areq_ctx->req_authsize;
0953 int rc = 0;
0954 dma_addr_t dma_addr;
0955 u32 mapped_nents = 0;
0956 u32 dummy = 0;
0957 u32 size_to_map;
0958 gfp_t flags = cc_gfp_flags(&req->base);
0959
0960 mlli_params->curr_pool = NULL;
0961 sg_data.num_of_buffers = 0;
0962
0963
0964
0965
0966 if (drvdata->coherent &&
0967 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
0968 req->src == req->dst)
0969 cc_copy_mac(dev, req, CC_SG_TO_BUF);
0970
0971
0972 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type ==
0973 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
0974 req->cryptlen :
0975 (req->cryptlen - authsize);
0976
0977 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE,
0978 DMA_BIDIRECTIONAL);
0979 if (dma_mapping_error(dev, dma_addr)) {
0980 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
0981 MAX_MAC_SIZE, areq_ctx->mac_buf);
0982 rc = -ENOMEM;
0983 goto aead_map_failure;
0984 }
0985 areq_ctx->mac_buf_dma_addr = dma_addr;
0986
0987 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
0988 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
0989
0990 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE,
0991 DMA_TO_DEVICE);
0992
0993 if (dma_mapping_error(dev, dma_addr)) {
0994 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
0995 AES_BLOCK_SIZE, addr);
0996 areq_ctx->ccm_iv0_dma_addr = 0;
0997 rc = -ENOMEM;
0998 goto aead_map_failure;
0999 }
1000 areq_ctx->ccm_iv0_dma_addr = dma_addr;
1001
1002 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
1003 &sg_data, areq_ctx->assoclen);
1004 if (rc)
1005 goto aead_map_failure;
1006 }
1007
1008 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) {
1009 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE,
1010 DMA_BIDIRECTIONAL);
1011 if (dma_mapping_error(dev, dma_addr)) {
1012 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n",
1013 AES_BLOCK_SIZE, areq_ctx->hkey);
1014 rc = -ENOMEM;
1015 goto aead_map_failure;
1016 }
1017 areq_ctx->hkey_dma_addr = dma_addr;
1018
1019 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block,
1020 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1021 if (dma_mapping_error(dev, dma_addr)) {
1022 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1023 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block);
1024 rc = -ENOMEM;
1025 goto aead_map_failure;
1026 }
1027 areq_ctx->gcm_block_len_dma_addr = dma_addr;
1028
1029 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1,
1030 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1031
1032 if (dma_mapping_error(dev, dma_addr)) {
1033 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1034 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1));
1035 areq_ctx->gcm_iv_inc1_dma_addr = 0;
1036 rc = -ENOMEM;
1037 goto aead_map_failure;
1038 }
1039 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr;
1040
1041 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2,
1042 AES_BLOCK_SIZE, DMA_TO_DEVICE);
1043
1044 if (dma_mapping_error(dev, dma_addr)) {
1045 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1046 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2));
1047 areq_ctx->gcm_iv_inc2_dma_addr = 0;
1048 rc = -ENOMEM;
1049 goto aead_map_failure;
1050 }
1051 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
1052 }
1053
1054 size_to_map = req->cryptlen + req->assoclen;
1055
1056 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) &&
1057 (req->src == req->dst)) {
1058 size_to_map += authsize;
1059 }
1060
1061 rc = cc_map_sg(dev, req->src, size_to_map,
1062 (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
1063 &areq_ctx->src.mapped_nents,
1064 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
1065 LLI_MAX_NUM_OF_DATA_ENTRIES),
1066 &dummy, &mapped_nents);
1067 if (rc)
1068 goto aead_map_failure;
1069
1070 if (areq_ctx->is_single_pass) {
1071
1072
1073
1074
1075
1076
1077 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false);
1078 if (rc)
1079 goto aead_map_failure;
1080 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false);
1081 if (rc)
1082 goto aead_map_failure;
1083 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false);
1084 if (rc)
1085 goto aead_map_failure;
1086 } else {
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true);
1108 if (rc)
1109 goto aead_map_failure;
1110 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true);
1111 if (rc)
1112 goto aead_map_failure;
1113 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true);
1114 if (rc)
1115 goto aead_map_failure;
1116 }
1117
1118
1119
1120
1121 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1122 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) {
1123 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1124 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1125 if (rc)
1126 goto aead_map_failure;
1127
1128 cc_update_aead_mlli_nents(drvdata, req);
1129 dev_dbg(dev, "assoc params mn %d\n",
1130 areq_ctx->assoc.mlli_nents);
1131 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents);
1132 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents);
1133 }
1134 return 0;
1135
1136 aead_map_failure:
1137 cc_unmap_aead_request(dev, req);
1138 return rc;
1139 }
1140
1141 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
1142 struct scatterlist *src, unsigned int nbytes,
1143 bool do_update, gfp_t flags)
1144 {
1145 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1146 struct device *dev = drvdata_to_dev(drvdata);
1147 u8 *curr_buff = cc_hash_buf(areq_ctx);
1148 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1149 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1150 struct buffer_array sg_data;
1151 int rc = 0;
1152 u32 dummy = 0;
1153 u32 mapped_nents = 0;
1154
1155 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1156 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1157
1158 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1159 mlli_params->curr_pool = NULL;
1160 sg_data.num_of_buffers = 0;
1161 areq_ctx->in_nents = 0;
1162
1163 if (nbytes == 0 && *curr_buff_cnt == 0) {
1164
1165 return 0;
1166 }
1167
1168
1169 if (*curr_buff_cnt) {
1170 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1171 &sg_data);
1172 if (rc)
1173 return rc;
1174 }
1175
1176 if (src && nbytes > 0 && do_update) {
1177 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE,
1178 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
1179 &dummy, &mapped_nents);
1180 if (rc)
1181 goto unmap_curr_buff;
1182 if (src && mapped_nents == 1 &&
1183 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1184 memcpy(areq_ctx->buff_sg, src,
1185 sizeof(struct scatterlist));
1186 areq_ctx->buff_sg->length = nbytes;
1187 areq_ctx->curr_sg = areq_ctx->buff_sg;
1188 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1189 } else {
1190 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1191 }
1192 }
1193
1194
1195 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1196 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1197
1198 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes,
1199 0, true, &areq_ctx->mlli_nents);
1200 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1201 if (rc)
1202 goto fail_unmap_din;
1203 }
1204
1205 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1);
1206 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n",
1207 cc_dma_buf_type(areq_ctx->data_dma_buf_type));
1208 return 0;
1209
1210 fail_unmap_din:
1211 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1212
1213 unmap_curr_buff:
1214 if (*curr_buff_cnt)
1215 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1216
1217 return rc;
1218 }
1219
1220 int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
1221 struct scatterlist *src, unsigned int nbytes,
1222 unsigned int block_size, gfp_t flags)
1223 {
1224 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1225 struct device *dev = drvdata_to_dev(drvdata);
1226 u8 *curr_buff = cc_hash_buf(areq_ctx);
1227 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx);
1228 u8 *next_buff = cc_next_buf(areq_ctx);
1229 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx);
1230 struct mlli_params *mlli_params = &areq_ctx->mlli_params;
1231 unsigned int update_data_len;
1232 u32 total_in_len = nbytes + *curr_buff_cnt;
1233 struct buffer_array sg_data;
1234 unsigned int swap_index = 0;
1235 int rc = 0;
1236 u32 dummy = 0;
1237 u32 mapped_nents = 0;
1238
1239 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1240 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
1241
1242 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL;
1243 mlli_params->curr_pool = NULL;
1244 areq_ctx->curr_sg = NULL;
1245 sg_data.num_of_buffers = 0;
1246 areq_ctx->in_nents = 0;
1247
1248 if (total_in_len < block_size) {
1249 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1250 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
1251 areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
1252 sg_copy_to_buffer(src, areq_ctx->in_nents,
1253 &curr_buff[*curr_buff_cnt], nbytes);
1254 *curr_buff_cnt += nbytes;
1255 return 1;
1256 }
1257
1258
1259 *next_buff_cnt = total_in_len & (block_size - 1);
1260
1261 update_data_len = total_in_len - *next_buff_cnt;
1262
1263 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1264 *next_buff_cnt, update_data_len);
1265
1266
1267 if (*next_buff_cnt) {
1268 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n",
1269 next_buff, (update_data_len - *curr_buff_cnt),
1270 *next_buff_cnt);
1271 cc_copy_sg_portion(dev, next_buff, src,
1272 (update_data_len - *curr_buff_cnt),
1273 nbytes, CC_SG_TO_BUF);
1274
1275 swap_index = 1;
1276 }
1277
1278 if (*curr_buff_cnt) {
1279 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt,
1280 &sg_data);
1281 if (rc)
1282 return rc;
1283
1284 swap_index = 1;
1285 }
1286
1287 if (update_data_len > *curr_buff_cnt) {
1288 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt),
1289 DMA_TO_DEVICE, &areq_ctx->in_nents,
1290 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy,
1291 &mapped_nents);
1292 if (rc)
1293 goto unmap_curr_buff;
1294 if (mapped_nents == 1 &&
1295 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
1296
1297 memcpy(areq_ctx->buff_sg, src,
1298 sizeof(struct scatterlist));
1299 areq_ctx->buff_sg->length = update_data_len;
1300 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI;
1301 areq_ctx->curr_sg = areq_ctx->buff_sg;
1302 } else {
1303 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI;
1304 }
1305 }
1306
1307 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) {
1308 mlli_params->curr_pool = drvdata->mlli_buffs_pool;
1309
1310 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src,
1311 (update_data_len - *curr_buff_cnt), 0, true,
1312 &areq_ctx->mlli_nents);
1313 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
1314 if (rc)
1315 goto fail_unmap_din;
1316 }
1317 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
1318
1319 return 0;
1320
1321 fail_unmap_din:
1322 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
1323
1324 unmap_curr_buff:
1325 if (*curr_buff_cnt)
1326 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1327
1328 return rc;
1329 }
1330
1331 void cc_unmap_hash_request(struct device *dev, void *ctx,
1332 struct scatterlist *src, bool do_revert)
1333 {
1334 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx;
1335 u32 *prev_len = cc_next_buf_cnt(areq_ctx);
1336
1337
1338
1339
1340 if (areq_ctx->mlli_params.curr_pool) {
1341 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n",
1342 &areq_ctx->mlli_params.mlli_dma_addr,
1343 areq_ctx->mlli_params.mlli_virt_addr);
1344 dma_pool_free(areq_ctx->mlli_params.curr_pool,
1345 areq_ctx->mlli_params.mlli_virt_addr,
1346 areq_ctx->mlli_params.mlli_dma_addr);
1347 }
1348
1349 if (src && areq_ctx->in_nents) {
1350 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1351 sg_virt(src), &sg_dma_address(src), sg_dma_len(src));
1352 dma_unmap_sg(dev, src,
1353 areq_ctx->in_nents, DMA_TO_DEVICE);
1354 }
1355
1356 if (*prev_len) {
1357 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1358 sg_virt(areq_ctx->buff_sg),
1359 &sg_dma_address(areq_ctx->buff_sg),
1360 sg_dma_len(areq_ctx->buff_sg));
1361 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
1362 if (!do_revert) {
1363
1364
1365
1366 *prev_len = 0;
1367 } else {
1368 areq_ctx->buff_index ^= 1;
1369 }
1370 }
1371 }
1372
1373 int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
1374 {
1375 struct device *dev = drvdata_to_dev(drvdata);
1376
1377 drvdata->mlli_buffs_pool =
1378 dma_pool_create("dx_single_mlli_tables", dev,
1379 MAX_NUM_OF_TOTAL_MLLI_ENTRIES *
1380 LLI_ENTRY_BYTE_SIZE,
1381 MLLI_TABLE_MIN_ALIGNMENT, 0);
1382
1383 if (!drvdata->mlli_buffs_pool)
1384 return -ENOMEM;
1385
1386 return 0;
1387 }
1388
1389 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
1390 {
1391 dma_pool_destroy(drvdata->mlli_buffs_pool);
1392 return 0;
1393 }