Back to home page

LXR

 
 

    


0001 /*
0002  * algif_aead: User-space interface for AEAD algorithms
0003  *
0004  * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
0005  *
0006  * This file provides the user-space API for AEAD ciphers.
0007  *
0008  * This file is derived from algif_skcipher.c.
0009  *
0010  * This program is free software; you can redistribute it and/or modify it
0011  * under the terms of the GNU General Public License as published by the Free
0012  * Software Foundation; either version 2 of the License, or (at your option)
0013  * any later version.
0014  */
0015 
0016 #include <crypto/internal/aead.h>
0017 #include <crypto/scatterwalk.h>
0018 #include <crypto/if_alg.h>
0019 #include <linux/init.h>
0020 #include <linux/list.h>
0021 #include <linux/kernel.h>
0022 #include <linux/mm.h>
0023 #include <linux/module.h>
0024 #include <linux/net.h>
0025 #include <net/sock.h>
0026 
0027 struct aead_sg_list {
0028     unsigned int cur;
0029     struct scatterlist sg[ALG_MAX_PAGES];
0030 };
0031 
0032 struct aead_async_rsgl {
0033     struct af_alg_sgl sgl;
0034     struct list_head list;
0035 };
0036 
0037 struct aead_async_req {
0038     struct scatterlist *tsgl;
0039     struct aead_async_rsgl first_rsgl;
0040     struct list_head list;
0041     struct kiocb *iocb;
0042     unsigned int tsgls;
0043     char iv[];
0044 };
0045 
0046 struct aead_ctx {
0047     struct aead_sg_list tsgl;
0048     struct aead_async_rsgl first_rsgl;
0049     struct list_head list;
0050 
0051     void *iv;
0052 
0053     struct af_alg_completion completion;
0054 
0055     unsigned long used;
0056 
0057     unsigned int len;
0058     bool more;
0059     bool merge;
0060     bool enc;
0061 
0062     size_t aead_assoclen;
0063     struct aead_request aead_req;
0064 };
0065 
0066 static inline int aead_sndbuf(struct sock *sk)
0067 {
0068     struct alg_sock *ask = alg_sk(sk);
0069     struct aead_ctx *ctx = ask->private;
0070 
0071     return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
0072               ctx->used, 0);
0073 }
0074 
0075 static inline bool aead_writable(struct sock *sk)
0076 {
0077     return PAGE_SIZE <= aead_sndbuf(sk);
0078 }
0079 
0080 static inline bool aead_sufficient_data(struct aead_ctx *ctx)
0081 {
0082     unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
0083 
0084     /*
0085      * The minimum amount of memory needed for an AEAD cipher is
0086      * the AAD and in case of decryption the tag.
0087      */
0088     return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
0089 }
0090 
0091 static void aead_reset_ctx(struct aead_ctx *ctx)
0092 {
0093     struct aead_sg_list *sgl = &ctx->tsgl;
0094 
0095     sg_init_table(sgl->sg, ALG_MAX_PAGES);
0096     sgl->cur = 0;
0097     ctx->used = 0;
0098     ctx->more = 0;
0099     ctx->merge = 0;
0100 }
0101 
0102 static void aead_put_sgl(struct sock *sk)
0103 {
0104     struct alg_sock *ask = alg_sk(sk);
0105     struct aead_ctx *ctx = ask->private;
0106     struct aead_sg_list *sgl = &ctx->tsgl;
0107     struct scatterlist *sg = sgl->sg;
0108     unsigned int i;
0109 
0110     for (i = 0; i < sgl->cur; i++) {
0111         if (!sg_page(sg + i))
0112             continue;
0113 
0114         put_page(sg_page(sg + i));
0115         sg_assign_page(sg + i, NULL);
0116     }
0117     aead_reset_ctx(ctx);
0118 }
0119 
0120 static void aead_wmem_wakeup(struct sock *sk)
0121 {
0122     struct socket_wq *wq;
0123 
0124     if (!aead_writable(sk))
0125         return;
0126 
0127     rcu_read_lock();
0128     wq = rcu_dereference(sk->sk_wq);
0129     if (skwq_has_sleeper(wq))
0130         wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
0131                                POLLRDNORM |
0132                                POLLRDBAND);
0133     sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
0134     rcu_read_unlock();
0135 }
0136 
0137 static int aead_wait_for_data(struct sock *sk, unsigned flags)
0138 {
0139     DEFINE_WAIT_FUNC(wait, woken_wake_function);
0140     struct alg_sock *ask = alg_sk(sk);
0141     struct aead_ctx *ctx = ask->private;
0142     long timeout;
0143     int err = -ERESTARTSYS;
0144 
0145     if (flags & MSG_DONTWAIT)
0146         return -EAGAIN;
0147 
0148     sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
0149     add_wait_queue(sk_sleep(sk), &wait);
0150     for (;;) {
0151         if (signal_pending(current))
0152             break;
0153         timeout = MAX_SCHEDULE_TIMEOUT;
0154         if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
0155             err = 0;
0156             break;
0157         }
0158     }
0159     remove_wait_queue(sk_sleep(sk), &wait);
0160 
0161     sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
0162 
0163     return err;
0164 }
0165 
0166 static void aead_data_wakeup(struct sock *sk)
0167 {
0168     struct alg_sock *ask = alg_sk(sk);
0169     struct aead_ctx *ctx = ask->private;
0170     struct socket_wq *wq;
0171 
0172     if (ctx->more)
0173         return;
0174     if (!ctx->used)
0175         return;
0176 
0177     rcu_read_lock();
0178     wq = rcu_dereference(sk->sk_wq);
0179     if (skwq_has_sleeper(wq))
0180         wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
0181                                POLLRDNORM |
0182                                POLLRDBAND);
0183     sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
0184     rcu_read_unlock();
0185 }
0186 
0187 static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
0188 {
0189     struct sock *sk = sock->sk;
0190     struct alg_sock *ask = alg_sk(sk);
0191     struct aead_ctx *ctx = ask->private;
0192     unsigned ivsize =
0193         crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req));
0194     struct aead_sg_list *sgl = &ctx->tsgl;
0195     struct af_alg_control con = {};
0196     long copied = 0;
0197     bool enc = 0;
0198     bool init = 0;
0199     int err = -EINVAL;
0200 
0201     if (msg->msg_controllen) {
0202         err = af_alg_cmsg_send(msg, &con);
0203         if (err)
0204             return err;
0205 
0206         init = 1;
0207         switch (con.op) {
0208         case ALG_OP_ENCRYPT:
0209             enc = 1;
0210             break;
0211         case ALG_OP_DECRYPT:
0212             enc = 0;
0213             break;
0214         default:
0215             return -EINVAL;
0216         }
0217 
0218         if (con.iv && con.iv->ivlen != ivsize)
0219             return -EINVAL;
0220     }
0221 
0222     lock_sock(sk);
0223     if (!ctx->more && ctx->used)
0224         goto unlock;
0225 
0226     if (init) {
0227         ctx->enc = enc;
0228         if (con.iv)
0229             memcpy(ctx->iv, con.iv->iv, ivsize);
0230 
0231         ctx->aead_assoclen = con.aead_assoclen;
0232     }
0233 
0234     while (size) {
0235         size_t len = size;
0236         struct scatterlist *sg = NULL;
0237 
0238         /* use the existing memory in an allocated page */
0239         if (ctx->merge) {
0240             sg = sgl->sg + sgl->cur - 1;
0241             len = min_t(unsigned long, len,
0242                     PAGE_SIZE - sg->offset - sg->length);
0243             err = memcpy_from_msg(page_address(sg_page(sg)) +
0244                           sg->offset + sg->length,
0245                           msg, len);
0246             if (err)
0247                 goto unlock;
0248 
0249             sg->length += len;
0250             ctx->merge = (sg->offset + sg->length) &
0251                      (PAGE_SIZE - 1);
0252 
0253             ctx->used += len;
0254             copied += len;
0255             size -= len;
0256             continue;
0257         }
0258 
0259         if (!aead_writable(sk)) {
0260             /* user space sent too much data */
0261             aead_put_sgl(sk);
0262             err = -EMSGSIZE;
0263             goto unlock;
0264         }
0265 
0266         /* allocate a new page */
0267         len = min_t(unsigned long, size, aead_sndbuf(sk));
0268         while (len) {
0269             size_t plen = 0;
0270 
0271             if (sgl->cur >= ALG_MAX_PAGES) {
0272                 aead_put_sgl(sk);
0273                 err = -E2BIG;
0274                 goto unlock;
0275             }
0276 
0277             sg = sgl->sg + sgl->cur;
0278             plen = min_t(size_t, len, PAGE_SIZE);
0279 
0280             sg_assign_page(sg, alloc_page(GFP_KERNEL));
0281             err = -ENOMEM;
0282             if (!sg_page(sg))
0283                 goto unlock;
0284 
0285             err = memcpy_from_msg(page_address(sg_page(sg)),
0286                           msg, plen);
0287             if (err) {
0288                 __free_page(sg_page(sg));
0289                 sg_assign_page(sg, NULL);
0290                 goto unlock;
0291             }
0292 
0293             sg->offset = 0;
0294             sg->length = plen;
0295             len -= plen;
0296             ctx->used += plen;
0297             copied += plen;
0298             sgl->cur++;
0299             size -= plen;
0300             ctx->merge = plen & (PAGE_SIZE - 1);
0301         }
0302     }
0303 
0304     err = 0;
0305 
0306     ctx->more = msg->msg_flags & MSG_MORE;
0307     if (!ctx->more && !aead_sufficient_data(ctx)) {
0308         aead_put_sgl(sk);
0309         err = -EMSGSIZE;
0310     }
0311 
0312 unlock:
0313     aead_data_wakeup(sk);
0314     release_sock(sk);
0315 
0316     return err ?: copied;
0317 }
0318 
0319 static ssize_t aead_sendpage(struct socket *sock, struct page *page,
0320                  int offset, size_t size, int flags)
0321 {
0322     struct sock *sk = sock->sk;
0323     struct alg_sock *ask = alg_sk(sk);
0324     struct aead_ctx *ctx = ask->private;
0325     struct aead_sg_list *sgl = &ctx->tsgl;
0326     int err = -EINVAL;
0327 
0328     if (flags & MSG_SENDPAGE_NOTLAST)
0329         flags |= MSG_MORE;
0330 
0331     if (sgl->cur >= ALG_MAX_PAGES)
0332         return -E2BIG;
0333 
0334     lock_sock(sk);
0335     if (!ctx->more && ctx->used)
0336         goto unlock;
0337 
0338     if (!size)
0339         goto done;
0340 
0341     if (!aead_writable(sk)) {
0342         /* user space sent too much data */
0343         aead_put_sgl(sk);
0344         err = -EMSGSIZE;
0345         goto unlock;
0346     }
0347 
0348     ctx->merge = 0;
0349 
0350     get_page(page);
0351     sg_set_page(sgl->sg + sgl->cur, page, size, offset);
0352     sgl->cur++;
0353     ctx->used += size;
0354 
0355     err = 0;
0356 
0357 done:
0358     ctx->more = flags & MSG_MORE;
0359     if (!ctx->more && !aead_sufficient_data(ctx)) {
0360         aead_put_sgl(sk);
0361         err = -EMSGSIZE;
0362     }
0363 
0364 unlock:
0365     aead_data_wakeup(sk);
0366     release_sock(sk);
0367 
0368     return err ?: size;
0369 }
0370 
0371 #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
0372         ((char *)req + sizeof(struct aead_request) + \
0373          crypto_aead_reqsize(tfm))
0374 
0375  #define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
0376     crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
0377     sizeof(struct aead_request)
0378 
0379 static void aead_async_cb(struct crypto_async_request *_req, int err)
0380 {
0381     struct sock *sk = _req->data;
0382     struct alg_sock *ask = alg_sk(sk);
0383     struct aead_ctx *ctx = ask->private;
0384     struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
0385     struct aead_request *req = aead_request_cast(_req);
0386     struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
0387     struct scatterlist *sg = areq->tsgl;
0388     struct aead_async_rsgl *rsgl;
0389     struct kiocb *iocb = areq->iocb;
0390     unsigned int i, reqlen = GET_REQ_SIZE(tfm);
0391 
0392     list_for_each_entry(rsgl, &areq->list, list) {
0393         af_alg_free_sg(&rsgl->sgl);
0394         if (rsgl != &areq->first_rsgl)
0395             sock_kfree_s(sk, rsgl, sizeof(*rsgl));
0396     }
0397 
0398     for (i = 0; i < areq->tsgls; i++)
0399         put_page(sg_page(sg + i));
0400 
0401     sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
0402     sock_kfree_s(sk, req, reqlen);
0403     __sock_put(sk);
0404     iocb->ki_complete(iocb, err, err);
0405 }
0406 
0407 static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
0408                   int flags)
0409 {
0410     struct sock *sk = sock->sk;
0411     struct alg_sock *ask = alg_sk(sk);
0412     struct aead_ctx *ctx = ask->private;
0413     struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
0414     struct aead_async_req *areq;
0415     struct aead_request *req = NULL;
0416     struct aead_sg_list *sgl = &ctx->tsgl;
0417     struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
0418     unsigned int as = crypto_aead_authsize(tfm);
0419     unsigned int i, reqlen = GET_REQ_SIZE(tfm);
0420     int err = -ENOMEM;
0421     unsigned long used;
0422     size_t outlen = 0;
0423     size_t usedpages = 0;
0424 
0425     lock_sock(sk);
0426     if (ctx->more) {
0427         err = aead_wait_for_data(sk, flags);
0428         if (err)
0429             goto unlock;
0430     }
0431 
0432     if (!aead_sufficient_data(ctx))
0433         goto unlock;
0434 
0435     used = ctx->used;
0436     if (ctx->enc)
0437         outlen = used + as;
0438     else
0439         outlen = used - as;
0440 
0441     req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
0442     if (unlikely(!req))
0443         goto unlock;
0444 
0445     areq = GET_ASYM_REQ(req, tfm);
0446     memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
0447     INIT_LIST_HEAD(&areq->list);
0448     areq->iocb = msg->msg_iocb;
0449     memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
0450     aead_request_set_tfm(req, tfm);
0451     aead_request_set_ad(req, ctx->aead_assoclen);
0452     aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
0453                   aead_async_cb, sk);
0454     used -= ctx->aead_assoclen;
0455 
0456     /* take over all tx sgls from ctx */
0457     areq->tsgl = sock_kmalloc(sk,
0458                   sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
0459                   GFP_KERNEL);
0460     if (unlikely(!areq->tsgl))
0461         goto free;
0462 
0463     sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
0464     for (i = 0; i < sgl->cur; i++)
0465         sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
0466                 sgl->sg[i].length, sgl->sg[i].offset);
0467 
0468     areq->tsgls = sgl->cur;
0469 
0470     /* create rx sgls */
0471     while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
0472         size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
0473                       (outlen - usedpages));
0474 
0475         if (list_empty(&areq->list)) {
0476             rsgl = &areq->first_rsgl;
0477 
0478         } else {
0479             rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
0480             if (unlikely(!rsgl)) {
0481                 err = -ENOMEM;
0482                 goto free;
0483             }
0484         }
0485         rsgl->sgl.npages = 0;
0486         list_add_tail(&rsgl->list, &areq->list);
0487 
0488         /* make one iovec available as scatterlist */
0489         err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
0490         if (err < 0)
0491             goto free;
0492 
0493         usedpages += err;
0494 
0495         /* chain the new scatterlist with previous one */
0496         if (last_rsgl)
0497             af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
0498 
0499         last_rsgl = rsgl;
0500 
0501         iov_iter_advance(&msg->msg_iter, err);
0502     }
0503 
0504     /* ensure output buffer is sufficiently large */
0505     if (usedpages < outlen) {
0506         err = -EINVAL;
0507         goto unlock;
0508     }
0509 
0510     aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
0511                    areq->iv);
0512     err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
0513     if (err) {
0514         if (err == -EINPROGRESS) {
0515             sock_hold(sk);
0516             err = -EIOCBQUEUED;
0517             aead_reset_ctx(ctx);
0518             goto unlock;
0519         } else if (err == -EBADMSG) {
0520             aead_put_sgl(sk);
0521         }
0522         goto free;
0523     }
0524     aead_put_sgl(sk);
0525 
0526 free:
0527     list_for_each_entry(rsgl, &areq->list, list) {
0528         af_alg_free_sg(&rsgl->sgl);
0529         if (rsgl != &areq->first_rsgl)
0530             sock_kfree_s(sk, rsgl, sizeof(*rsgl));
0531     }
0532     if (areq->tsgl)
0533         sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
0534     if (req)
0535         sock_kfree_s(sk, req, reqlen);
0536 unlock:
0537     aead_wmem_wakeup(sk);
0538     release_sock(sk);
0539     return err ? err : outlen;
0540 }
0541 
0542 static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
0543 {
0544     struct sock *sk = sock->sk;
0545     struct alg_sock *ask = alg_sk(sk);
0546     struct aead_ctx *ctx = ask->private;
0547     unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
0548     struct aead_sg_list *sgl = &ctx->tsgl;
0549     struct aead_async_rsgl *last_rsgl = NULL;
0550     struct aead_async_rsgl *rsgl, *tmp;
0551     int err = -EINVAL;
0552     unsigned long used = 0;
0553     size_t outlen = 0;
0554     size_t usedpages = 0;
0555 
0556     lock_sock(sk);
0557 
0558     /*
0559      * Please see documentation of aead_request_set_crypt for the
0560      * description of the AEAD memory structure expected from the caller.
0561      */
0562 
0563     if (ctx->more) {
0564         err = aead_wait_for_data(sk, flags);
0565         if (err)
0566             goto unlock;
0567     }
0568 
0569     /* data length provided by caller via sendmsg/sendpage */
0570     used = ctx->used;
0571 
0572     /*
0573      * Make sure sufficient data is present -- note, the same check is
0574      * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
0575      * shall provide an information to the data sender that something is
0576      * wrong, but they are irrelevant to maintain the kernel integrity.
0577      * We need this check here too in case user space decides to not honor
0578      * the error message in sendmsg/sendpage and still call recvmsg. This
0579      * check here protects the kernel integrity.
0580      */
0581     if (!aead_sufficient_data(ctx))
0582         goto unlock;
0583 
0584     /*
0585      * Calculate the minimum output buffer size holding the result of the
0586      * cipher operation. When encrypting data, the receiving buffer is
0587      * larger by the tag length compared to the input buffer as the
0588      * encryption operation generates the tag. For decryption, the input
0589      * buffer provides the tag which is consumed resulting in only the
0590      * plaintext without a buffer for the tag returned to the caller.
0591      */
0592     if (ctx->enc)
0593         outlen = used + as;
0594     else
0595         outlen = used - as;
0596 
0597     /*
0598      * The cipher operation input data is reduced by the associated data
0599      * length as this data is processed separately later on.
0600      */
0601     used -= ctx->aead_assoclen;
0602 
0603     /* convert iovecs of output buffers into scatterlists */
0604     while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
0605         size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
0606                       (outlen - usedpages));
0607 
0608         if (list_empty(&ctx->list)) {
0609             rsgl = &ctx->first_rsgl;
0610         } else {
0611             rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
0612             if (unlikely(!rsgl)) {
0613                 err = -ENOMEM;
0614                 goto unlock;
0615             }
0616         }
0617         rsgl->sgl.npages = 0;
0618         list_add_tail(&rsgl->list, &ctx->list);
0619 
0620         /* make one iovec available as scatterlist */
0621         err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
0622         if (err < 0)
0623             goto unlock;
0624         usedpages += err;
0625         /* chain the new scatterlist with previous one */
0626         if (last_rsgl)
0627             af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
0628 
0629         last_rsgl = rsgl;
0630 
0631         iov_iter_advance(&msg->msg_iter, err);
0632     }
0633 
0634     /* ensure output buffer is sufficiently large */
0635     if (usedpages < outlen) {
0636         err = -EINVAL;
0637         goto unlock;
0638     }
0639 
0640     sg_mark_end(sgl->sg + sgl->cur - 1);
0641     aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
0642                    used, ctx->iv);
0643     aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
0644 
0645     err = af_alg_wait_for_completion(ctx->enc ?
0646                      crypto_aead_encrypt(&ctx->aead_req) :
0647                      crypto_aead_decrypt(&ctx->aead_req),
0648                      &ctx->completion);
0649 
0650     if (err) {
0651         /* EBADMSG implies a valid cipher operation took place */
0652         if (err == -EBADMSG)
0653             aead_put_sgl(sk);
0654 
0655         goto unlock;
0656     }
0657 
0658     aead_put_sgl(sk);
0659     err = 0;
0660 
0661 unlock:
0662     list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
0663         af_alg_free_sg(&rsgl->sgl);
0664         list_del(&rsgl->list);
0665         if (rsgl != &ctx->first_rsgl)
0666             sock_kfree_s(sk, rsgl, sizeof(*rsgl));
0667     }
0668     INIT_LIST_HEAD(&ctx->list);
0669     aead_wmem_wakeup(sk);
0670     release_sock(sk);
0671 
0672     return err ? err : outlen;
0673 }
0674 
0675 static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
0676             int flags)
0677 {
0678     return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
0679         aead_recvmsg_async(sock, msg, flags) :
0680         aead_recvmsg_sync(sock, msg, flags);
0681 }
0682 
0683 static unsigned int aead_poll(struct file *file, struct socket *sock,
0684                   poll_table *wait)
0685 {
0686     struct sock *sk = sock->sk;
0687     struct alg_sock *ask = alg_sk(sk);
0688     struct aead_ctx *ctx = ask->private;
0689     unsigned int mask;
0690 
0691     sock_poll_wait(file, sk_sleep(sk), wait);
0692     mask = 0;
0693 
0694     if (!ctx->more)
0695         mask |= POLLIN | POLLRDNORM;
0696 
0697     if (aead_writable(sk))
0698         mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
0699 
0700     return mask;
0701 }
0702 
0703 static struct proto_ops algif_aead_ops = {
0704     .family     =   PF_ALG,
0705 
0706     .connect    =   sock_no_connect,
0707     .socketpair =   sock_no_socketpair,
0708     .getname    =   sock_no_getname,
0709     .ioctl      =   sock_no_ioctl,
0710     .listen     =   sock_no_listen,
0711     .shutdown   =   sock_no_shutdown,
0712     .getsockopt =   sock_no_getsockopt,
0713     .mmap       =   sock_no_mmap,
0714     .bind       =   sock_no_bind,
0715     .accept     =   sock_no_accept,
0716     .setsockopt =   sock_no_setsockopt,
0717 
0718     .release    =   af_alg_release,
0719     .sendmsg    =   aead_sendmsg,
0720     .sendpage   =   aead_sendpage,
0721     .recvmsg    =   aead_recvmsg,
0722     .poll       =   aead_poll,
0723 };
0724 
0725 static void *aead_bind(const char *name, u32 type, u32 mask)
0726 {
0727     return crypto_alloc_aead(name, type, mask);
0728 }
0729 
0730 static void aead_release(void *private)
0731 {
0732     crypto_free_aead(private);
0733 }
0734 
0735 static int aead_setauthsize(void *private, unsigned int authsize)
0736 {
0737     return crypto_aead_setauthsize(private, authsize);
0738 }
0739 
0740 static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
0741 {
0742     return crypto_aead_setkey(private, key, keylen);
0743 }
0744 
0745 static void aead_sock_destruct(struct sock *sk)
0746 {
0747     struct alg_sock *ask = alg_sk(sk);
0748     struct aead_ctx *ctx = ask->private;
0749     unsigned int ivlen = crypto_aead_ivsize(
0750                 crypto_aead_reqtfm(&ctx->aead_req));
0751 
0752     WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
0753     aead_put_sgl(sk);
0754     sock_kzfree_s(sk, ctx->iv, ivlen);
0755     sock_kfree_s(sk, ctx, ctx->len);
0756     af_alg_release_parent(sk);
0757 }
0758 
0759 static int aead_accept_parent(void *private, struct sock *sk)
0760 {
0761     struct aead_ctx *ctx;
0762     struct alg_sock *ask = alg_sk(sk);
0763     unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
0764     unsigned int ivlen = crypto_aead_ivsize(private);
0765 
0766     ctx = sock_kmalloc(sk, len, GFP_KERNEL);
0767     if (!ctx)
0768         return -ENOMEM;
0769     memset(ctx, 0, len);
0770 
0771     ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
0772     if (!ctx->iv) {
0773         sock_kfree_s(sk, ctx, len);
0774         return -ENOMEM;
0775     }
0776     memset(ctx->iv, 0, ivlen);
0777 
0778     ctx->len = len;
0779     ctx->used = 0;
0780     ctx->more = 0;
0781     ctx->merge = 0;
0782     ctx->enc = 0;
0783     ctx->tsgl.cur = 0;
0784     ctx->aead_assoclen = 0;
0785     af_alg_init_completion(&ctx->completion);
0786     sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
0787     INIT_LIST_HEAD(&ctx->list);
0788 
0789     ask->private = ctx;
0790 
0791     aead_request_set_tfm(&ctx->aead_req, private);
0792     aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
0793                   af_alg_complete, &ctx->completion);
0794 
0795     sk->sk_destruct = aead_sock_destruct;
0796 
0797     return 0;
0798 }
0799 
0800 static const struct af_alg_type algif_type_aead = {
0801     .bind       =   aead_bind,
0802     .release    =   aead_release,
0803     .setkey     =   aead_setkey,
0804     .setauthsize    =   aead_setauthsize,
0805     .accept     =   aead_accept_parent,
0806     .ops        =   &algif_aead_ops,
0807     .name       =   "aead",
0808     .owner      =   THIS_MODULE
0809 };
0810 
0811 static int __init algif_aead_init(void)
0812 {
0813     return af_alg_register_type(&algif_type_aead);
0814 }
0815 
0816 static void __exit algif_aead_exit(void)
0817 {
0818     int err = af_alg_unregister_type(&algif_type_aead);
0819     BUG_ON(err);
0820 }
0821 
0822 module_init(algif_aead_init);
0823 module_exit(algif_aead_exit);
0824 MODULE_LICENSE("GPL");
0825 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
0826 MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");