Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
0004  *
0005  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
0006  *
0007  * This file add support for AES cipher with 128,192,256 bits
0008  * keysize in CBC and ECB mode.
0009  * Add support also for DES and 3DES in CBC and ECB mode.
0010  *
0011  * You could find the datasheet in Documentation/arm/sunxi.rst
0012  */
0013 #include "sun4i-ss.h"
0014 
0015 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
0016 {
0017     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0018     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0019     struct sun4i_ss_ctx *ss = op->ss;
0020     unsigned int ivsize = crypto_skcipher_ivsize(tfm);
0021     struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
0022     u32 mode = ctx->mode;
0023     /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
0024     u32 rx_cnt = SS_RX_DEFAULT;
0025     u32 tx_cnt = 0;
0026     u32 spaces;
0027     u32 v;
0028     int err = 0;
0029     unsigned int i;
0030     unsigned int ileft = areq->cryptlen;
0031     unsigned int oleft = areq->cryptlen;
0032     unsigned int todo;
0033     unsigned long pi = 0, po = 0; /* progress for in and out */
0034     bool miter_err;
0035     struct sg_mapping_iter mi, mo;
0036     unsigned int oi, oo; /* offset for in and out */
0037     unsigned long flags;
0038     struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
0039     struct sun4i_ss_alg_template *algt;
0040 
0041     if (!areq->cryptlen)
0042         return 0;
0043 
0044     if (!areq->src || !areq->dst) {
0045         dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
0046         return -EINVAL;
0047     }
0048 
0049     if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
0050         scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
0051                      areq->cryptlen - ivsize, ivsize, 0);
0052     }
0053 
0054     if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
0055         algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
0056         algt->stat_opti++;
0057         algt->stat_bytes += areq->cryptlen;
0058     }
0059 
0060     spin_lock_irqsave(&ss->slock, flags);
0061 
0062     for (i = 0; i < op->keylen / 4; i++)
0063         writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
0064 
0065     if (areq->iv) {
0066         for (i = 0; i < 4 && i < ivsize / 4; i++) {
0067             v = *(u32 *)(areq->iv + i * 4);
0068             writesl(ss->base + SS_IV0 + i * 4, &v, 1);
0069         }
0070     }
0071     writel(mode, ss->base + SS_CTL);
0072 
0073 
0074     ileft = areq->cryptlen / 4;
0075     oleft = areq->cryptlen / 4;
0076     oi = 0;
0077     oo = 0;
0078     do {
0079         if (ileft) {
0080             sg_miter_start(&mi, areq->src, sg_nents(areq->src),
0081                     SG_MITER_FROM_SG | SG_MITER_ATOMIC);
0082             if (pi)
0083                 sg_miter_skip(&mi, pi);
0084             miter_err = sg_miter_next(&mi);
0085             if (!miter_err || !mi.addr) {
0086                 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
0087                 err = -EINVAL;
0088                 goto release_ss;
0089             }
0090             todo = min(rx_cnt, ileft);
0091             todo = min_t(size_t, todo, (mi.length - oi) / 4);
0092             if (todo) {
0093                 ileft -= todo;
0094                 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
0095                 oi += todo * 4;
0096             }
0097             if (oi == mi.length) {
0098                 pi += mi.length;
0099                 oi = 0;
0100             }
0101             sg_miter_stop(&mi);
0102         }
0103 
0104         spaces = readl(ss->base + SS_FCSR);
0105         rx_cnt = SS_RXFIFO_SPACES(spaces);
0106         tx_cnt = SS_TXFIFO_SPACES(spaces);
0107 
0108         sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
0109                    SG_MITER_TO_SG | SG_MITER_ATOMIC);
0110         if (po)
0111             sg_miter_skip(&mo, po);
0112         miter_err = sg_miter_next(&mo);
0113         if (!miter_err || !mo.addr) {
0114             dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
0115             err = -EINVAL;
0116             goto release_ss;
0117         }
0118         todo = min(tx_cnt, oleft);
0119         todo = min_t(size_t, todo, (mo.length - oo) / 4);
0120         if (todo) {
0121             oleft -= todo;
0122             readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
0123             oo += todo * 4;
0124         }
0125         if (oo == mo.length) {
0126             oo = 0;
0127             po += mo.length;
0128         }
0129         sg_miter_stop(&mo);
0130     } while (oleft);
0131 
0132     if (areq->iv) {
0133         if (mode & SS_DECRYPTION) {
0134             memcpy(areq->iv, ctx->backup_iv, ivsize);
0135             memzero_explicit(ctx->backup_iv, ivsize);
0136         } else {
0137             scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
0138                          ivsize, 0);
0139         }
0140     }
0141 
0142 release_ss:
0143     writel(0, ss->base + SS_CTL);
0144     spin_unlock_irqrestore(&ss->slock, flags);
0145     return err;
0146 }
0147 
0148 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
0149 {
0150     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0151     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0152     struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
0153     int err;
0154     struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
0155     struct sun4i_ss_alg_template *algt;
0156 
0157     if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
0158         algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
0159         algt->stat_fb++;
0160     }
0161 
0162     skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
0163     skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
0164                       areq->base.complete, areq->base.data);
0165     skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
0166                    areq->cryptlen, areq->iv);
0167     if (ctx->mode & SS_DECRYPTION)
0168         err = crypto_skcipher_decrypt(&ctx->fallback_req);
0169     else
0170         err = crypto_skcipher_encrypt(&ctx->fallback_req);
0171 
0172     return err;
0173 }
0174 
0175 /* Generic function that support SG with size not multiple of 4 */
0176 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
0177 {
0178     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0179     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0180     struct sun4i_ss_ctx *ss = op->ss;
0181     int no_chunk = 1;
0182     struct scatterlist *in_sg = areq->src;
0183     struct scatterlist *out_sg = areq->dst;
0184     unsigned int ivsize = crypto_skcipher_ivsize(tfm);
0185     struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
0186     struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
0187     struct sun4i_ss_alg_template *algt;
0188     u32 mode = ctx->mode;
0189     /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
0190     u32 rx_cnt = SS_RX_DEFAULT;
0191     u32 tx_cnt = 0;
0192     u32 v;
0193     u32 spaces;
0194     int err = 0;
0195     unsigned int i;
0196     unsigned int ileft = areq->cryptlen;
0197     unsigned int oleft = areq->cryptlen;
0198     unsigned int todo;
0199     struct sg_mapping_iter mi, mo;
0200     unsigned long pi = 0, po = 0; /* progress for in and out */
0201     bool miter_err;
0202     unsigned int oi, oo;    /* offset for in and out */
0203     unsigned int ob = 0;    /* offset in buf */
0204     unsigned int obo = 0;   /* offset in bufo*/
0205     unsigned int obl = 0;   /* length of data in bufo */
0206     unsigned long flags;
0207     bool need_fallback = false;
0208 
0209     if (!areq->cryptlen)
0210         return 0;
0211 
0212     if (!areq->src || !areq->dst) {
0213         dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
0214         return -EINVAL;
0215     }
0216 
0217     algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
0218     if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
0219         need_fallback = true;
0220 
0221     /*
0222      * if we have only SGs with size multiple of 4,
0223      * we can use the SS optimized function
0224      */
0225     while (in_sg && no_chunk == 1) {
0226         if ((in_sg->length | in_sg->offset) & 3u)
0227             no_chunk = 0;
0228         in_sg = sg_next(in_sg);
0229     }
0230     while (out_sg && no_chunk == 1) {
0231         if ((out_sg->length | out_sg->offset) & 3u)
0232             no_chunk = 0;
0233         out_sg = sg_next(out_sg);
0234     }
0235 
0236     if (no_chunk == 1 && !need_fallback)
0237         return sun4i_ss_opti_poll(areq);
0238 
0239     if (need_fallback)
0240         return sun4i_ss_cipher_poll_fallback(areq);
0241 
0242     if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
0243         scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
0244                      areq->cryptlen - ivsize, ivsize, 0);
0245     }
0246 
0247     if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
0248         algt->stat_req++;
0249         algt->stat_bytes += areq->cryptlen;
0250     }
0251 
0252     spin_lock_irqsave(&ss->slock, flags);
0253 
0254     for (i = 0; i < op->keylen / 4; i++)
0255         writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
0256 
0257     if (areq->iv) {
0258         for (i = 0; i < 4 && i < ivsize / 4; i++) {
0259             v = *(u32 *)(areq->iv + i * 4);
0260             writesl(ss->base + SS_IV0 + i * 4, &v, 1);
0261         }
0262     }
0263     writel(mode, ss->base + SS_CTL);
0264 
0265     ileft = areq->cryptlen;
0266     oleft = areq->cryptlen;
0267     oi = 0;
0268     oo = 0;
0269 
0270     while (oleft) {
0271         if (ileft) {
0272             sg_miter_start(&mi, areq->src, sg_nents(areq->src),
0273                        SG_MITER_FROM_SG | SG_MITER_ATOMIC);
0274             if (pi)
0275                 sg_miter_skip(&mi, pi);
0276             miter_err = sg_miter_next(&mi);
0277             if (!miter_err || !mi.addr) {
0278                 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
0279                 err = -EINVAL;
0280                 goto release_ss;
0281             }
0282             /*
0283              * todo is the number of consecutive 4byte word that we
0284              * can read from current SG
0285              */
0286             todo = min(rx_cnt, ileft / 4);
0287             todo = min_t(size_t, todo, (mi.length - oi) / 4);
0288             if (todo && !ob) {
0289                 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
0290                     todo);
0291                 ileft -= todo * 4;
0292                 oi += todo * 4;
0293             } else {
0294                 /*
0295                  * not enough consecutive bytes, so we need to
0296                  * linearize in buf. todo is in bytes
0297                  * After that copy, if we have a multiple of 4
0298                  * we need to be able to write all buf in one
0299                  * pass, so it is why we min() with rx_cnt
0300                  */
0301                 todo = min(rx_cnt * 4 - ob, ileft);
0302                 todo = min_t(size_t, todo, mi.length - oi);
0303                 memcpy(ss->buf + ob, mi.addr + oi, todo);
0304                 ileft -= todo;
0305                 oi += todo;
0306                 ob += todo;
0307                 if (!(ob % 4)) {
0308                     writesl(ss->base + SS_RXFIFO, ss->buf,
0309                         ob / 4);
0310                     ob = 0;
0311                 }
0312             }
0313             if (oi == mi.length) {
0314                 pi += mi.length;
0315                 oi = 0;
0316             }
0317             sg_miter_stop(&mi);
0318         }
0319 
0320         spaces = readl(ss->base + SS_FCSR);
0321         rx_cnt = SS_RXFIFO_SPACES(spaces);
0322         tx_cnt = SS_TXFIFO_SPACES(spaces);
0323 
0324         if (!tx_cnt)
0325             continue;
0326         sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
0327                    SG_MITER_TO_SG | SG_MITER_ATOMIC);
0328         if (po)
0329             sg_miter_skip(&mo, po);
0330         miter_err = sg_miter_next(&mo);
0331         if (!miter_err || !mo.addr) {
0332             dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
0333             err = -EINVAL;
0334             goto release_ss;
0335         }
0336         /* todo in 4bytes word */
0337         todo = min(tx_cnt, oleft / 4);
0338         todo = min_t(size_t, todo, (mo.length - oo) / 4);
0339 
0340         if (todo) {
0341             readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
0342             oleft -= todo * 4;
0343             oo += todo * 4;
0344             if (oo == mo.length) {
0345                 po += mo.length;
0346                 oo = 0;
0347             }
0348         } else {
0349             /*
0350              * read obl bytes in bufo, we read at maximum for
0351              * emptying the device
0352              */
0353             readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
0354             obl = tx_cnt * 4;
0355             obo = 0;
0356             do {
0357                 /*
0358                  * how many bytes we can copy ?
0359                  * no more than remaining SG size
0360                  * no more than remaining buffer
0361                  * no need to test against oleft
0362                  */
0363                 todo = min_t(size_t,
0364                          mo.length - oo, obl - obo);
0365                 memcpy(mo.addr + oo, ss->bufo + obo, todo);
0366                 oleft -= todo;
0367                 obo += todo;
0368                 oo += todo;
0369                 if (oo == mo.length) {
0370                     po += mo.length;
0371                     sg_miter_next(&mo);
0372                     oo = 0;
0373                 }
0374             } while (obo < obl);
0375             /* bufo must be fully used here */
0376         }
0377         sg_miter_stop(&mo);
0378     }
0379     if (areq->iv) {
0380         if (mode & SS_DECRYPTION) {
0381             memcpy(areq->iv, ctx->backup_iv, ivsize);
0382             memzero_explicit(ctx->backup_iv, ivsize);
0383         } else {
0384             scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
0385                          ivsize, 0);
0386         }
0387     }
0388 
0389 release_ss:
0390     writel(0, ss->base + SS_CTL);
0391     spin_unlock_irqrestore(&ss->slock, flags);
0392 
0393     return err;
0394 }
0395 
0396 /* CBC AES */
0397 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
0398 {
0399     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0400     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0401     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0402 
0403     rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
0404         op->keymode;
0405     return sun4i_ss_cipher_poll(areq);
0406 }
0407 
0408 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
0409 {
0410     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0411     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0412     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0413 
0414     rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
0415         op->keymode;
0416     return sun4i_ss_cipher_poll(areq);
0417 }
0418 
0419 /* ECB AES */
0420 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
0421 {
0422     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0423     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0424     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0425 
0426     rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
0427         op->keymode;
0428     return sun4i_ss_cipher_poll(areq);
0429 }
0430 
0431 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
0432 {
0433     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0434     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0435     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0436 
0437     rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
0438         op->keymode;
0439     return sun4i_ss_cipher_poll(areq);
0440 }
0441 
0442 /* CBC DES */
0443 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
0444 {
0445     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0446     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0447     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0448 
0449     rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
0450         op->keymode;
0451     return sun4i_ss_cipher_poll(areq);
0452 }
0453 
0454 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
0455 {
0456     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0457     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0458     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0459 
0460     rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
0461         op->keymode;
0462     return sun4i_ss_cipher_poll(areq);
0463 }
0464 
0465 /* ECB DES */
0466 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
0467 {
0468     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0469     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0470     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0471 
0472     rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
0473         op->keymode;
0474     return sun4i_ss_cipher_poll(areq);
0475 }
0476 
0477 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
0478 {
0479     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0480     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0481     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0482 
0483     rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
0484         op->keymode;
0485     return sun4i_ss_cipher_poll(areq);
0486 }
0487 
0488 /* CBC 3DES */
0489 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
0490 {
0491     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0492     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0493     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0494 
0495     rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
0496         op->keymode;
0497     return sun4i_ss_cipher_poll(areq);
0498 }
0499 
0500 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
0501 {
0502     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0503     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0504     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0505 
0506     rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
0507         op->keymode;
0508     return sun4i_ss_cipher_poll(areq);
0509 }
0510 
0511 /* ECB 3DES */
0512 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
0513 {
0514     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0515     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0516     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0517 
0518     rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
0519         op->keymode;
0520     return sun4i_ss_cipher_poll(areq);
0521 }
0522 
0523 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
0524 {
0525     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
0526     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0527     struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
0528 
0529     rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
0530         op->keymode;
0531     return sun4i_ss_cipher_poll(areq);
0532 }
0533 
0534 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
0535 {
0536     struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
0537     struct sun4i_ss_alg_template *algt;
0538     const char *name = crypto_tfm_alg_name(tfm);
0539     int err;
0540 
0541     memset(op, 0, sizeof(struct sun4i_tfm_ctx));
0542 
0543     algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
0544                 alg.crypto.base);
0545     op->ss = algt->ss;
0546 
0547     op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
0548     if (IS_ERR(op->fallback_tfm)) {
0549         dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
0550             name, PTR_ERR(op->fallback_tfm));
0551         return PTR_ERR(op->fallback_tfm);
0552     }
0553 
0554     crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
0555                     sizeof(struct sun4i_cipher_req_ctx) +
0556                     crypto_skcipher_reqsize(op->fallback_tfm));
0557 
0558     err = pm_runtime_resume_and_get(op->ss->dev);
0559     if (err < 0)
0560         goto error_pm;
0561 
0562     return 0;
0563 error_pm:
0564     crypto_free_skcipher(op->fallback_tfm);
0565     return err;
0566 }
0567 
0568 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
0569 {
0570     struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
0571 
0572     crypto_free_skcipher(op->fallback_tfm);
0573     pm_runtime_put(op->ss->dev);
0574 }
0575 
0576 /* check and set the AES key, prepare the mode to be used */
0577 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
0578             unsigned int keylen)
0579 {
0580     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0581     struct sun4i_ss_ctx *ss = op->ss;
0582 
0583     switch (keylen) {
0584     case 128 / 8:
0585         op->keymode = SS_AES_128BITS;
0586         break;
0587     case 192 / 8:
0588         op->keymode = SS_AES_192BITS;
0589         break;
0590     case 256 / 8:
0591         op->keymode = SS_AES_256BITS;
0592         break;
0593     default:
0594         dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
0595         return -EINVAL;
0596     }
0597     op->keylen = keylen;
0598     memcpy(op->key, key, keylen);
0599 
0600     crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
0601     crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
0602 
0603     return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
0604 }
0605 
0606 /* check and set the DES key, prepare the mode to be used */
0607 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
0608             unsigned int keylen)
0609 {
0610     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0611     int err;
0612 
0613     err = verify_skcipher_des_key(tfm, key);
0614     if (err)
0615         return err;
0616 
0617     op->keylen = keylen;
0618     memcpy(op->key, key, keylen);
0619 
0620     crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
0621     crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
0622 
0623     return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
0624 }
0625 
0626 /* check and set the 3DES key, prepare the mode to be used */
0627 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
0628              unsigned int keylen)
0629 {
0630     struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
0631     int err;
0632 
0633     err = verify_skcipher_des3_key(tfm, key);
0634     if (err)
0635         return err;
0636 
0637     op->keylen = keylen;
0638     memcpy(op->key, key, keylen);
0639 
0640     crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
0641     crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
0642 
0643     return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
0644 }