Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002  /* Algorithms supported by virtio crypto device
0003   *
0004   * Authors: Gonglei <arei.gonglei@huawei.com>
0005   *
0006   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
0007   */
0008 
0009 #include <linux/scatterlist.h>
0010 #include <crypto/algapi.h>
0011 #include <crypto/internal/skcipher.h>
0012 #include <linux/err.h>
0013 #include <crypto/scatterwalk.h>
0014 #include <linux/atomic.h>
0015 
0016 #include <uapi/linux/virtio_crypto.h>
0017 #include "virtio_crypto_common.h"
0018 
0019 
0020 struct virtio_crypto_skcipher_ctx {
0021     struct crypto_engine_ctx enginectx;
0022     struct virtio_crypto *vcrypto;
0023     struct crypto_skcipher *tfm;
0024 
0025     struct virtio_crypto_sym_session_info enc_sess_info;
0026     struct virtio_crypto_sym_session_info dec_sess_info;
0027 };
0028 
0029 struct virtio_crypto_sym_request {
0030     struct virtio_crypto_request base;
0031 
0032     /* Cipher or aead */
0033     uint32_t type;
0034     struct virtio_crypto_skcipher_ctx *skcipher_ctx;
0035     struct skcipher_request *skcipher_req;
0036     uint8_t *iv;
0037     /* Encryption? */
0038     bool encrypt;
0039 };
0040 
0041 struct virtio_crypto_algo {
0042     uint32_t algonum;
0043     uint32_t service;
0044     unsigned int active_devs;
0045     struct skcipher_alg algo;
0046 };
0047 
0048 /*
0049  * The algs_lock protects the below global virtio_crypto_active_devs
0050  * and crypto algorithms registion.
0051  */
0052 static DEFINE_MUTEX(algs_lock);
0053 static void virtio_crypto_skcipher_finalize_req(
0054     struct virtio_crypto_sym_request *vc_sym_req,
0055     struct skcipher_request *req,
0056     int err);
0057 
0058 static void virtio_crypto_dataq_sym_callback
0059         (struct virtio_crypto_request *vc_req, int len)
0060 {
0061     struct virtio_crypto_sym_request *vc_sym_req =
0062         container_of(vc_req, struct virtio_crypto_sym_request, base);
0063     struct skcipher_request *ablk_req;
0064     int error;
0065 
0066     /* Finish the encrypt or decrypt process */
0067     if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
0068         switch (vc_req->status) {
0069         case VIRTIO_CRYPTO_OK:
0070             error = 0;
0071             break;
0072         case VIRTIO_CRYPTO_INVSESS:
0073         case VIRTIO_CRYPTO_ERR:
0074             error = -EINVAL;
0075             break;
0076         case VIRTIO_CRYPTO_BADMSG:
0077             error = -EBADMSG;
0078             break;
0079         default:
0080             error = -EIO;
0081             break;
0082         }
0083         ablk_req = vc_sym_req->skcipher_req;
0084         virtio_crypto_skcipher_finalize_req(vc_sym_req,
0085                             ablk_req, error);
0086     }
0087 }
0088 
0089 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
0090 {
0091     u64 total = 0;
0092 
0093     for (total = 0; sg; sg = sg_next(sg))
0094         total += sg->length;
0095 
0096     return total;
0097 }
0098 
0099 static int
0100 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
0101 {
0102     switch (key_len) {
0103     case AES_KEYSIZE_128:
0104     case AES_KEYSIZE_192:
0105     case AES_KEYSIZE_256:
0106         *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
0107         break;
0108     default:
0109         return -EINVAL;
0110     }
0111     return 0;
0112 }
0113 
0114 static int virtio_crypto_alg_skcipher_init_session(
0115         struct virtio_crypto_skcipher_ctx *ctx,
0116         uint32_t alg, const uint8_t *key,
0117         unsigned int keylen,
0118         int encrypt)
0119 {
0120     struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
0121     struct virtio_crypto *vcrypto = ctx->vcrypto;
0122     int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
0123     int err;
0124     unsigned int num_out = 0, num_in = 0;
0125     struct virtio_crypto_op_ctrl_req *ctrl;
0126     struct virtio_crypto_session_input *input;
0127     struct virtio_crypto_sym_create_session_req *sym_create_session;
0128     struct virtio_crypto_ctrl_request *vc_ctrl_req;
0129 
0130     /*
0131      * Avoid to do DMA from the stack, switch to using
0132      * dynamically-allocated for the key
0133      */
0134     uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
0135 
0136     if (!cipher_key)
0137         return -ENOMEM;
0138 
0139     vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
0140     if (!vc_ctrl_req) {
0141         err = -ENOMEM;
0142         goto out;
0143     }
0144 
0145     /* Pad ctrl header */
0146     ctrl = &vc_ctrl_req->ctrl;
0147     ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
0148     ctrl->header.algo = cpu_to_le32(alg);
0149     /* Set the default dataqueue id to 0 */
0150     ctrl->header.queue_id = 0;
0151 
0152     input = &vc_ctrl_req->input;
0153     input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
0154     /* Pad cipher's parameters */
0155     sym_create_session = &ctrl->u.sym_create_session;
0156     sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
0157     sym_create_session->u.cipher.para.algo = ctrl->header.algo;
0158     sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
0159     sym_create_session->u.cipher.para.op = cpu_to_le32(op);
0160 
0161     sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
0162     sgs[num_out++] = &outhdr;
0163 
0164     /* Set key */
0165     sg_init_one(&key_sg, cipher_key, keylen);
0166     sgs[num_out++] = &key_sg;
0167 
0168     /* Return status and session id back */
0169     sg_init_one(&inhdr, input, sizeof(*input));
0170     sgs[num_out + num_in++] = &inhdr;
0171 
0172     err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
0173     if (err < 0)
0174         goto out;
0175 
0176     if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
0177         pr_err("virtio_crypto: Create session failed status: %u\n",
0178             le32_to_cpu(input->status));
0179         err = -EINVAL;
0180         goto out;
0181     }
0182 
0183     if (encrypt)
0184         ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
0185     else
0186         ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
0187 
0188     err = 0;
0189 out:
0190     kfree(vc_ctrl_req);
0191     kfree_sensitive(cipher_key);
0192     return err;
0193 }
0194 
0195 static int virtio_crypto_alg_skcipher_close_session(
0196         struct virtio_crypto_skcipher_ctx *ctx,
0197         int encrypt)
0198 {
0199     struct scatterlist outhdr, status_sg, *sgs[2];
0200     struct virtio_crypto_destroy_session_req *destroy_session;
0201     struct virtio_crypto *vcrypto = ctx->vcrypto;
0202     int err;
0203     unsigned int num_out = 0, num_in = 0;
0204     struct virtio_crypto_op_ctrl_req *ctrl;
0205     struct virtio_crypto_inhdr *ctrl_status;
0206     struct virtio_crypto_ctrl_request *vc_ctrl_req;
0207 
0208     vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
0209     if (!vc_ctrl_req)
0210         return -ENOMEM;
0211 
0212     ctrl_status = &vc_ctrl_req->ctrl_status;
0213     ctrl_status->status = VIRTIO_CRYPTO_ERR;
0214     /* Pad ctrl header */
0215     ctrl = &vc_ctrl_req->ctrl;
0216     ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
0217     /* Set the default virtqueue id to 0 */
0218     ctrl->header.queue_id = 0;
0219 
0220     destroy_session = &ctrl->u.destroy_session;
0221 
0222     if (encrypt)
0223         destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
0224     else
0225         destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
0226 
0227     sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
0228     sgs[num_out++] = &outhdr;
0229 
0230     /* Return status and session id back */
0231     sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
0232     sgs[num_out + num_in++] = &status_sg;
0233 
0234     err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
0235     if (err < 0)
0236         goto out;
0237 
0238     if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
0239         pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
0240             ctrl_status->status, destroy_session->session_id);
0241 
0242         return -EINVAL;
0243     }
0244 
0245     err = 0;
0246 out:
0247     kfree(vc_ctrl_req);
0248     return err;
0249 }
0250 
0251 static int virtio_crypto_alg_skcipher_init_sessions(
0252         struct virtio_crypto_skcipher_ctx *ctx,
0253         const uint8_t *key, unsigned int keylen)
0254 {
0255     uint32_t alg;
0256     int ret;
0257     struct virtio_crypto *vcrypto = ctx->vcrypto;
0258 
0259     if (keylen > vcrypto->max_cipher_key_len) {
0260         pr_err("virtio_crypto: the key is too long\n");
0261         return -EINVAL;
0262     }
0263 
0264     if (virtio_crypto_alg_validate_key(keylen, &alg))
0265         return -EINVAL;
0266 
0267     /* Create encryption session */
0268     ret = virtio_crypto_alg_skcipher_init_session(ctx,
0269             alg, key, keylen, 1);
0270     if (ret)
0271         return ret;
0272     /* Create decryption session */
0273     ret = virtio_crypto_alg_skcipher_init_session(ctx,
0274             alg, key, keylen, 0);
0275     if (ret) {
0276         virtio_crypto_alg_skcipher_close_session(ctx, 1);
0277         return ret;
0278     }
0279     return 0;
0280 }
0281 
0282 /* Note: kernel crypto API realization */
0283 static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
0284                      const uint8_t *key,
0285                      unsigned int keylen)
0286 {
0287     struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
0288     uint32_t alg;
0289     int ret;
0290 
0291     ret = virtio_crypto_alg_validate_key(keylen, &alg);
0292     if (ret)
0293         return ret;
0294 
0295     if (!ctx->vcrypto) {
0296         /* New key */
0297         int node = virtio_crypto_get_current_node();
0298         struct virtio_crypto *vcrypto =
0299                       virtcrypto_get_dev_node(node,
0300                       VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
0301         if (!vcrypto) {
0302             pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
0303             return -ENODEV;
0304         }
0305 
0306         ctx->vcrypto = vcrypto;
0307     } else {
0308         /* Rekeying, we should close the created sessions previously */
0309         virtio_crypto_alg_skcipher_close_session(ctx, 1);
0310         virtio_crypto_alg_skcipher_close_session(ctx, 0);
0311     }
0312 
0313     ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
0314     if (ret) {
0315         virtcrypto_dev_put(ctx->vcrypto);
0316         ctx->vcrypto = NULL;
0317 
0318         return ret;
0319     }
0320 
0321     return 0;
0322 }
0323 
0324 static int
0325 __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
0326         struct skcipher_request *req,
0327         struct data_queue *data_vq)
0328 {
0329     struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
0330     struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
0331     struct virtio_crypto_request *vc_req = &vc_sym_req->base;
0332     unsigned int ivsize = crypto_skcipher_ivsize(tfm);
0333     struct virtio_crypto *vcrypto = ctx->vcrypto;
0334     struct virtio_crypto_op_data_req *req_data;
0335     int src_nents, dst_nents;
0336     int err;
0337     unsigned long flags;
0338     struct scatterlist outhdr, iv_sg, status_sg, **sgs;
0339     u64 dst_len;
0340     unsigned int num_out = 0, num_in = 0;
0341     int sg_total;
0342     uint8_t *iv;
0343     struct scatterlist *sg;
0344 
0345     src_nents = sg_nents_for_len(req->src, req->cryptlen);
0346     if (src_nents < 0) {
0347         pr_err("Invalid number of src SG.\n");
0348         return src_nents;
0349     }
0350 
0351     dst_nents = sg_nents(req->dst);
0352 
0353     pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
0354             src_nents, dst_nents);
0355 
0356     /* Why 3?  outhdr + iv + inhdr */
0357     sg_total = src_nents + dst_nents + 3;
0358     sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
0359                 dev_to_node(&vcrypto->vdev->dev));
0360     if (!sgs)
0361         return -ENOMEM;
0362 
0363     req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
0364                 dev_to_node(&vcrypto->vdev->dev));
0365     if (!req_data) {
0366         kfree(sgs);
0367         return -ENOMEM;
0368     }
0369 
0370     vc_req->req_data = req_data;
0371     vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
0372     /* Head of operation */
0373     if (vc_sym_req->encrypt) {
0374         req_data->header.session_id =
0375             cpu_to_le64(ctx->enc_sess_info.session_id);
0376         req_data->header.opcode =
0377             cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
0378     } else {
0379         req_data->header.session_id =
0380             cpu_to_le64(ctx->dec_sess_info.session_id);
0381         req_data->header.opcode =
0382             cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
0383     }
0384     req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
0385     req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
0386     req_data->u.sym_req.u.cipher.para.src_data_len =
0387             cpu_to_le32(req->cryptlen);
0388 
0389     dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
0390     if (unlikely(dst_len > U32_MAX)) {
0391         pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
0392         err = -EINVAL;
0393         goto free;
0394     }
0395 
0396     dst_len = min_t(unsigned int, req->cryptlen, dst_len);
0397     pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
0398             req->cryptlen, dst_len);
0399 
0400     if (unlikely(req->cryptlen + dst_len + ivsize +
0401         sizeof(vc_req->status) > vcrypto->max_size)) {
0402         pr_err("virtio_crypto: The length is too big\n");
0403         err = -EINVAL;
0404         goto free;
0405     }
0406 
0407     req_data->u.sym_req.u.cipher.para.dst_data_len =
0408             cpu_to_le32((uint32_t)dst_len);
0409 
0410     /* Outhdr */
0411     sg_init_one(&outhdr, req_data, sizeof(*req_data));
0412     sgs[num_out++] = &outhdr;
0413 
0414     /* IV */
0415 
0416     /*
0417      * Avoid to do DMA from the stack, switch to using
0418      * dynamically-allocated for the IV
0419      */
0420     iv = kzalloc_node(ivsize, GFP_ATOMIC,
0421                 dev_to_node(&vcrypto->vdev->dev));
0422     if (!iv) {
0423         err = -ENOMEM;
0424         goto free;
0425     }
0426     memcpy(iv, req->iv, ivsize);
0427     if (!vc_sym_req->encrypt)
0428         scatterwalk_map_and_copy(req->iv, req->src,
0429                      req->cryptlen - AES_BLOCK_SIZE,
0430                      AES_BLOCK_SIZE, 0);
0431 
0432     sg_init_one(&iv_sg, iv, ivsize);
0433     sgs[num_out++] = &iv_sg;
0434     vc_sym_req->iv = iv;
0435 
0436     /* Source data */
0437     for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
0438         sgs[num_out++] = sg;
0439 
0440     /* Destination data */
0441     for (sg = req->dst; sg; sg = sg_next(sg))
0442         sgs[num_out + num_in++] = sg;
0443 
0444     /* Status */
0445     sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
0446     sgs[num_out + num_in++] = &status_sg;
0447 
0448     vc_req->sgs = sgs;
0449 
0450     spin_lock_irqsave(&data_vq->lock, flags);
0451     err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
0452                 num_in, vc_req, GFP_ATOMIC);
0453     virtqueue_kick(data_vq->vq);
0454     spin_unlock_irqrestore(&data_vq->lock, flags);
0455     if (unlikely(err < 0))
0456         goto free_iv;
0457 
0458     return 0;
0459 
0460 free_iv:
0461     kfree_sensitive(iv);
0462 free:
0463     kfree_sensitive(req_data);
0464     kfree(sgs);
0465     return err;
0466 }
0467 
0468 static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
0469 {
0470     struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
0471     struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
0472     struct virtio_crypto_sym_request *vc_sym_req =
0473                 skcipher_request_ctx(req);
0474     struct virtio_crypto_request *vc_req = &vc_sym_req->base;
0475     struct virtio_crypto *vcrypto = ctx->vcrypto;
0476     /* Use the first data virtqueue as default */
0477     struct data_queue *data_vq = &vcrypto->data_vq[0];
0478 
0479     if (!req->cryptlen)
0480         return 0;
0481     if (req->cryptlen % AES_BLOCK_SIZE)
0482         return -EINVAL;
0483 
0484     vc_req->dataq = data_vq;
0485     vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
0486     vc_sym_req->skcipher_ctx = ctx;
0487     vc_sym_req->skcipher_req = req;
0488     vc_sym_req->encrypt = true;
0489 
0490     return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
0491 }
0492 
0493 static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
0494 {
0495     struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
0496     struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
0497     struct virtio_crypto_sym_request *vc_sym_req =
0498                 skcipher_request_ctx(req);
0499     struct virtio_crypto_request *vc_req = &vc_sym_req->base;
0500     struct virtio_crypto *vcrypto = ctx->vcrypto;
0501     /* Use the first data virtqueue as default */
0502     struct data_queue *data_vq = &vcrypto->data_vq[0];
0503 
0504     if (!req->cryptlen)
0505         return 0;
0506     if (req->cryptlen % AES_BLOCK_SIZE)
0507         return -EINVAL;
0508 
0509     vc_req->dataq = data_vq;
0510     vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
0511     vc_sym_req->skcipher_ctx = ctx;
0512     vc_sym_req->skcipher_req = req;
0513     vc_sym_req->encrypt = false;
0514 
0515     return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
0516 }
0517 
0518 static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
0519 {
0520     struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
0521 
0522     crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
0523     ctx->tfm = tfm;
0524 
0525     ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
0526     ctx->enginectx.op.prepare_request = NULL;
0527     ctx->enginectx.op.unprepare_request = NULL;
0528     return 0;
0529 }
0530 
0531 static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
0532 {
0533     struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
0534 
0535     if (!ctx->vcrypto)
0536         return;
0537 
0538     virtio_crypto_alg_skcipher_close_session(ctx, 1);
0539     virtio_crypto_alg_skcipher_close_session(ctx, 0);
0540     virtcrypto_dev_put(ctx->vcrypto);
0541     ctx->vcrypto = NULL;
0542 }
0543 
0544 int virtio_crypto_skcipher_crypt_req(
0545     struct crypto_engine *engine, void *vreq)
0546 {
0547     struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
0548     struct virtio_crypto_sym_request *vc_sym_req =
0549                 skcipher_request_ctx(req);
0550     struct virtio_crypto_request *vc_req = &vc_sym_req->base;
0551     struct data_queue *data_vq = vc_req->dataq;
0552     int ret;
0553 
0554     ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
0555     if (ret < 0)
0556         return ret;
0557 
0558     virtqueue_kick(data_vq->vq);
0559 
0560     return 0;
0561 }
0562 
0563 static void virtio_crypto_skcipher_finalize_req(
0564     struct virtio_crypto_sym_request *vc_sym_req,
0565     struct skcipher_request *req,
0566     int err)
0567 {
0568     if (vc_sym_req->encrypt)
0569         scatterwalk_map_and_copy(req->iv, req->dst,
0570                      req->cryptlen - AES_BLOCK_SIZE,
0571                      AES_BLOCK_SIZE, 0);
0572     kfree_sensitive(vc_sym_req->iv);
0573     virtcrypto_clear_request(&vc_sym_req->base);
0574 
0575     crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
0576                        req, err);
0577 }
0578 
0579 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
0580     .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
0581     .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
0582     .algo = {
0583         .base.cra_name      = "cbc(aes)",
0584         .base.cra_driver_name   = "virtio_crypto_aes_cbc",
0585         .base.cra_priority  = 150,
0586         .base.cra_flags     = CRYPTO_ALG_ASYNC |
0587                       CRYPTO_ALG_ALLOCATES_MEMORY,
0588         .base.cra_blocksize = AES_BLOCK_SIZE,
0589         .base.cra_ctxsize   = sizeof(struct virtio_crypto_skcipher_ctx),
0590         .base.cra_module    = THIS_MODULE,
0591         .init           = virtio_crypto_skcipher_init,
0592         .exit           = virtio_crypto_skcipher_exit,
0593         .setkey         = virtio_crypto_skcipher_setkey,
0594         .decrypt        = virtio_crypto_skcipher_decrypt,
0595         .encrypt        = virtio_crypto_skcipher_encrypt,
0596         .min_keysize        = AES_MIN_KEY_SIZE,
0597         .max_keysize        = AES_MAX_KEY_SIZE,
0598         .ivsize         = AES_BLOCK_SIZE,
0599     },
0600 } };
0601 
0602 int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
0603 {
0604     int ret = 0;
0605     int i = 0;
0606 
0607     mutex_lock(&algs_lock);
0608 
0609     for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
0610 
0611         uint32_t service = virtio_crypto_algs[i].service;
0612         uint32_t algonum = virtio_crypto_algs[i].algonum;
0613 
0614         if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
0615             continue;
0616 
0617         if (virtio_crypto_algs[i].active_devs == 0) {
0618             ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
0619             if (ret)
0620                 goto unlock;
0621         }
0622 
0623         virtio_crypto_algs[i].active_devs++;
0624         dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
0625              virtio_crypto_algs[i].algo.base.cra_name);
0626     }
0627 
0628 unlock:
0629     mutex_unlock(&algs_lock);
0630     return ret;
0631 }
0632 
0633 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
0634 {
0635     int i = 0;
0636 
0637     mutex_lock(&algs_lock);
0638 
0639     for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
0640 
0641         uint32_t service = virtio_crypto_algs[i].service;
0642         uint32_t algonum = virtio_crypto_algs[i].algonum;
0643 
0644         if (virtio_crypto_algs[i].active_devs == 0 ||
0645             !virtcrypto_algo_is_supported(vcrypto, service, algonum))
0646             continue;
0647 
0648         if (virtio_crypto_algs[i].active_devs == 1)
0649             crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
0650 
0651         virtio_crypto_algs[i].active_devs--;
0652     }
0653 
0654     mutex_unlock(&algs_lock);
0655 }