0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/crypto.h>
0012 #include <linux/delay.h>
0013 #include <linux/io.h>
0014 #include <crypto/scatterwalk.h>
0015 #include <linux/scatterlist.h>
0016 #include <linux/dma-mapping.h>
0017 #include <crypto/internal/skcipher.h>
0018 #include "amlogic-gxl.h"
0019
0020 static int get_engine_number(struct meson_dev *mc)
0021 {
0022 return atomic_inc_return(&mc->flow) % MAXFLOW;
0023 }
0024
0025 static bool meson_cipher_need_fallback(struct skcipher_request *areq)
0026 {
0027 struct scatterlist *src_sg = areq->src;
0028 struct scatterlist *dst_sg = areq->dst;
0029
0030 if (areq->cryptlen == 0)
0031 return true;
0032
0033 if (sg_nents(src_sg) != sg_nents(dst_sg))
0034 return true;
0035
0036
0037 if (sg_nents(src_sg) > MAXDESC - 3 || sg_nents(dst_sg) > MAXDESC - 3)
0038 return true;
0039
0040 while (src_sg && dst_sg) {
0041 if ((src_sg->length % 16) != 0)
0042 return true;
0043 if ((dst_sg->length % 16) != 0)
0044 return true;
0045 if (src_sg->length != dst_sg->length)
0046 return true;
0047 if (!IS_ALIGNED(src_sg->offset, sizeof(u32)))
0048 return true;
0049 if (!IS_ALIGNED(dst_sg->offset, sizeof(u32)))
0050 return true;
0051 src_sg = sg_next(src_sg);
0052 dst_sg = sg_next(dst_sg);
0053 }
0054
0055 return false;
0056 }
0057
0058 static int meson_cipher_do_fallback(struct skcipher_request *areq)
0059 {
0060 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0061 struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0062 struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0063 int err;
0064 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
0065 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
0066 struct meson_alg_template *algt;
0067
0068 algt = container_of(alg, struct meson_alg_template, alg.skcipher);
0069 algt->stat_fb++;
0070 #endif
0071 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
0072 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
0073 areq->base.complete, areq->base.data);
0074 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
0075 areq->cryptlen, areq->iv);
0076
0077 if (rctx->op_dir == MESON_DECRYPT)
0078 err = crypto_skcipher_decrypt(&rctx->fallback_req);
0079 else
0080 err = crypto_skcipher_encrypt(&rctx->fallback_req);
0081 return err;
0082 }
0083
0084 static int meson_cipher(struct skcipher_request *areq)
0085 {
0086 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0087 struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0088 struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0089 struct meson_dev *mc = op->mc;
0090 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
0091 struct meson_alg_template *algt;
0092 int flow = rctx->flow;
0093 unsigned int todo, eat, len;
0094 struct scatterlist *src_sg = areq->src;
0095 struct scatterlist *dst_sg = areq->dst;
0096 struct meson_desc *desc;
0097 int nr_sgs, nr_sgd;
0098 int i, err = 0;
0099 unsigned int keyivlen, ivsize, offset, tloffset;
0100 dma_addr_t phykeyiv;
0101 void *backup_iv = NULL, *bkeyiv;
0102 u32 v;
0103
0104 algt = container_of(alg, struct meson_alg_template, alg.skcipher);
0105
0106 dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
0107 crypto_tfm_alg_name(areq->base.tfm),
0108 areq->cryptlen,
0109 rctx->op_dir, crypto_skcipher_ivsize(tfm),
0110 op->keylen, flow);
0111
0112 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
0113 algt->stat_req++;
0114 mc->chanlist[flow].stat_req++;
0115 #endif
0116
0117
0118
0119
0120
0121
0122 bkeyiv = kzalloc(48, GFP_KERNEL | GFP_DMA);
0123 if (!bkeyiv)
0124 return -ENOMEM;
0125
0126 memcpy(bkeyiv, op->key, op->keylen);
0127 keyivlen = op->keylen;
0128
0129 ivsize = crypto_skcipher_ivsize(tfm);
0130 if (areq->iv && ivsize > 0) {
0131 if (ivsize > areq->cryptlen) {
0132 dev_err(mc->dev, "invalid ivsize=%d vs len=%d\n", ivsize, areq->cryptlen);
0133 err = -EINVAL;
0134 goto theend;
0135 }
0136 memcpy(bkeyiv + 32, areq->iv, ivsize);
0137 keyivlen = 48;
0138 if (rctx->op_dir == MESON_DECRYPT) {
0139 backup_iv = kzalloc(ivsize, GFP_KERNEL);
0140 if (!backup_iv) {
0141 err = -ENOMEM;
0142 goto theend;
0143 }
0144 offset = areq->cryptlen - ivsize;
0145 scatterwalk_map_and_copy(backup_iv, areq->src, offset,
0146 ivsize, 0);
0147 }
0148 }
0149 if (keyivlen == 24)
0150 keyivlen = 32;
0151
0152 phykeyiv = dma_map_single(mc->dev, bkeyiv, keyivlen,
0153 DMA_TO_DEVICE);
0154 err = dma_mapping_error(mc->dev, phykeyiv);
0155 if (err) {
0156 dev_err(mc->dev, "Cannot DMA MAP KEY IV\n");
0157 goto theend;
0158 }
0159
0160 tloffset = 0;
0161 eat = 0;
0162 i = 0;
0163 while (keyivlen > eat) {
0164 desc = &mc->chanlist[flow].tl[tloffset];
0165 memset(desc, 0, sizeof(struct meson_desc));
0166 todo = min(keyivlen - eat, 16u);
0167 desc->t_src = cpu_to_le32(phykeyiv + i * 16);
0168 desc->t_dst = cpu_to_le32(i * 16);
0169 v = (MODE_KEY << 20) | DESC_OWN | 16;
0170 desc->t_status = cpu_to_le32(v);
0171
0172 eat += todo;
0173 i++;
0174 tloffset++;
0175 }
0176
0177 if (areq->src == areq->dst) {
0178 nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
0179 DMA_BIDIRECTIONAL);
0180 if (nr_sgs < 0) {
0181 dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
0182 err = -EINVAL;
0183 goto theend;
0184 }
0185 nr_sgd = nr_sgs;
0186 } else {
0187 nr_sgs = dma_map_sg(mc->dev, areq->src, sg_nents(areq->src),
0188 DMA_TO_DEVICE);
0189 if (nr_sgs < 0 || nr_sgs > MAXDESC - 3) {
0190 dev_err(mc->dev, "Invalid SG count %d\n", nr_sgs);
0191 err = -EINVAL;
0192 goto theend;
0193 }
0194 nr_sgd = dma_map_sg(mc->dev, areq->dst, sg_nents(areq->dst),
0195 DMA_FROM_DEVICE);
0196 if (nr_sgd < 0 || nr_sgd > MAXDESC - 3) {
0197 dev_err(mc->dev, "Invalid SG count %d\n", nr_sgd);
0198 err = -EINVAL;
0199 goto theend;
0200 }
0201 }
0202
0203 src_sg = areq->src;
0204 dst_sg = areq->dst;
0205 len = areq->cryptlen;
0206 while (src_sg) {
0207 desc = &mc->chanlist[flow].tl[tloffset];
0208 memset(desc, 0, sizeof(struct meson_desc));
0209
0210 desc->t_src = cpu_to_le32(sg_dma_address(src_sg));
0211 desc->t_dst = cpu_to_le32(sg_dma_address(dst_sg));
0212 todo = min(len, sg_dma_len(src_sg));
0213 v = (op->keymode << 20) | DESC_OWN | todo | (algt->blockmode << 26);
0214 if (rctx->op_dir)
0215 v |= DESC_ENCRYPTION;
0216 len -= todo;
0217
0218 if (!sg_next(src_sg))
0219 v |= DESC_LAST;
0220 desc->t_status = cpu_to_le32(v);
0221 tloffset++;
0222 src_sg = sg_next(src_sg);
0223 dst_sg = sg_next(dst_sg);
0224 }
0225
0226 reinit_completion(&mc->chanlist[flow].complete);
0227 mc->chanlist[flow].status = 0;
0228 writel(mc->chanlist[flow].t_phy | 2, mc->base + (flow << 2));
0229 wait_for_completion_interruptible_timeout(&mc->chanlist[flow].complete,
0230 msecs_to_jiffies(500));
0231 if (mc->chanlist[flow].status == 0) {
0232 dev_err(mc->dev, "DMA timeout for flow %d\n", flow);
0233 err = -EINVAL;
0234 }
0235
0236 dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
0237
0238 if (areq->src == areq->dst) {
0239 dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
0240 } else {
0241 dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
0242 dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
0243 }
0244
0245 if (areq->iv && ivsize > 0) {
0246 if (rctx->op_dir == MESON_DECRYPT) {
0247 memcpy(areq->iv, backup_iv, ivsize);
0248 } else {
0249 scatterwalk_map_and_copy(areq->iv, areq->dst,
0250 areq->cryptlen - ivsize,
0251 ivsize, 0);
0252 }
0253 }
0254 theend:
0255 kfree_sensitive(bkeyiv);
0256 kfree_sensitive(backup_iv);
0257
0258 return err;
0259 }
0260
0261 static int meson_handle_cipher_request(struct crypto_engine *engine,
0262 void *areq)
0263 {
0264 int err;
0265 struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
0266
0267 err = meson_cipher(breq);
0268 local_bh_disable();
0269 crypto_finalize_skcipher_request(engine, breq, err);
0270 local_bh_enable();
0271
0272 return 0;
0273 }
0274
0275 int meson_skdecrypt(struct skcipher_request *areq)
0276 {
0277 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0278 struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0279 struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0280 struct crypto_engine *engine;
0281 int e;
0282
0283 rctx->op_dir = MESON_DECRYPT;
0284 if (meson_cipher_need_fallback(areq))
0285 return meson_cipher_do_fallback(areq);
0286 e = get_engine_number(op->mc);
0287 engine = op->mc->chanlist[e].engine;
0288 rctx->flow = e;
0289
0290 return crypto_transfer_skcipher_request_to_engine(engine, areq);
0291 }
0292
0293 int meson_skencrypt(struct skcipher_request *areq)
0294 {
0295 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0296 struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0297 struct meson_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0298 struct crypto_engine *engine;
0299 int e;
0300
0301 rctx->op_dir = MESON_ENCRYPT;
0302 if (meson_cipher_need_fallback(areq))
0303 return meson_cipher_do_fallback(areq);
0304 e = get_engine_number(op->mc);
0305 engine = op->mc->chanlist[e].engine;
0306 rctx->flow = e;
0307
0308 return crypto_transfer_skcipher_request_to_engine(engine, areq);
0309 }
0310
0311 int meson_cipher_init(struct crypto_tfm *tfm)
0312 {
0313 struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
0314 struct meson_alg_template *algt;
0315 const char *name = crypto_tfm_alg_name(tfm);
0316 struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
0317 struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
0318
0319 memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
0320
0321 algt = container_of(alg, struct meson_alg_template, alg.skcipher);
0322 op->mc = algt->mc;
0323
0324 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
0325 if (IS_ERR(op->fallback_tfm)) {
0326 dev_err(op->mc->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
0327 name, PTR_ERR(op->fallback_tfm));
0328 return PTR_ERR(op->fallback_tfm);
0329 }
0330
0331 sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
0332 crypto_skcipher_reqsize(op->fallback_tfm);
0333
0334 op->enginectx.op.do_one_request = meson_handle_cipher_request;
0335 op->enginectx.op.prepare_request = NULL;
0336 op->enginectx.op.unprepare_request = NULL;
0337
0338 return 0;
0339 }
0340
0341 void meson_cipher_exit(struct crypto_tfm *tfm)
0342 {
0343 struct meson_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
0344
0345 kfree_sensitive(op->key);
0346 crypto_free_skcipher(op->fallback_tfm);
0347 }
0348
0349 int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
0350 unsigned int keylen)
0351 {
0352 struct meson_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0353 struct meson_dev *mc = op->mc;
0354
0355 switch (keylen) {
0356 case 128 / 8:
0357 op->keymode = MODE_AES_128;
0358 break;
0359 case 192 / 8:
0360 op->keymode = MODE_AES_192;
0361 break;
0362 case 256 / 8:
0363 op->keymode = MODE_AES_256;
0364 break;
0365 default:
0366 dev_dbg(mc->dev, "ERROR: Invalid keylen %u\n", keylen);
0367 return -EINVAL;
0368 }
0369 kfree_sensitive(op->key);
0370 op->keylen = keylen;
0371 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
0372 if (!op->key)
0373 return -ENOMEM;
0374
0375 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
0376 }