Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 #define pr_fmt(fmt) "IPsec: " fmt
0003 
0004 #include <crypto/algapi.h>
0005 #include <crypto/hash.h>
0006 #include <linux/err.h>
0007 #include <linux/module.h>
0008 #include <linux/slab.h>
0009 #include <net/ip.h>
0010 #include <net/xfrm.h>
0011 #include <net/ah.h>
0012 #include <linux/crypto.h>
0013 #include <linux/pfkeyv2.h>
0014 #include <linux/scatterlist.h>
0015 #include <net/icmp.h>
0016 #include <net/protocol.h>
0017 
0018 struct ah_skb_cb {
0019     struct xfrm_skb_cb xfrm;
0020     void *tmp;
0021 };
0022 
0023 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
0024 
0025 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
0026               unsigned int size)
0027 {
0028     unsigned int len;
0029 
0030     len = size + crypto_ahash_digestsize(ahash) +
0031           (crypto_ahash_alignmask(ahash) &
0032            ~(crypto_tfm_ctx_alignment() - 1));
0033 
0034     len = ALIGN(len, crypto_tfm_ctx_alignment());
0035 
0036     len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
0037     len = ALIGN(len, __alignof__(struct scatterlist));
0038 
0039     len += sizeof(struct scatterlist) * nfrags;
0040 
0041     return kmalloc(len, GFP_ATOMIC);
0042 }
0043 
0044 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
0045 {
0046     return tmp + offset;
0047 }
0048 
0049 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
0050                  unsigned int offset)
0051 {
0052     return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
0053 }
0054 
0055 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
0056                            u8 *icv)
0057 {
0058     struct ahash_request *req;
0059 
0060     req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
0061                 crypto_tfm_ctx_alignment());
0062 
0063     ahash_request_set_tfm(req, ahash);
0064 
0065     return req;
0066 }
0067 
0068 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
0069                          struct ahash_request *req)
0070 {
0071     return (void *)ALIGN((unsigned long)(req + 1) +
0072                  crypto_ahash_reqsize(ahash),
0073                  __alignof__(struct scatterlist));
0074 }
0075 
0076 /* Clear mutable options and find final destination to substitute
0077  * into IP header for icv calculation. Options are already checked
0078  * for validity, so paranoia is not required. */
0079 
0080 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
0081 {
0082     unsigned char *optptr = (unsigned char *)(iph+1);
0083     int  l = iph->ihl*4 - sizeof(struct iphdr);
0084     int  optlen;
0085 
0086     while (l > 0) {
0087         switch (*optptr) {
0088         case IPOPT_END:
0089             return 0;
0090         case IPOPT_NOOP:
0091             l--;
0092             optptr++;
0093             continue;
0094         }
0095         optlen = optptr[1];
0096         if (optlen<2 || optlen>l)
0097             return -EINVAL;
0098         switch (*optptr) {
0099         case IPOPT_SEC:
0100         case 0x85:  /* Some "Extended Security" crap. */
0101         case IPOPT_CIPSO:
0102         case IPOPT_RA:
0103         case 0x80|21:   /* RFC1770 */
0104             break;
0105         case IPOPT_LSRR:
0106         case IPOPT_SSRR:
0107             if (optlen < 6)
0108                 return -EINVAL;
0109             memcpy(daddr, optptr+optlen-4, 4);
0110             fallthrough;
0111         default:
0112             memset(optptr, 0, optlen);
0113         }
0114         l -= optlen;
0115         optptr += optlen;
0116     }
0117     return 0;
0118 }
0119 
0120 static void ah_output_done(struct crypto_async_request *base, int err)
0121 {
0122     u8 *icv;
0123     struct iphdr *iph;
0124     struct sk_buff *skb = base->data;
0125     struct xfrm_state *x = skb_dst(skb)->xfrm;
0126     struct ah_data *ahp = x->data;
0127     struct iphdr *top_iph = ip_hdr(skb);
0128     struct ip_auth_hdr *ah = ip_auth_hdr(skb);
0129     int ihl = ip_hdrlen(skb);
0130 
0131     iph = AH_SKB_CB(skb)->tmp;
0132     icv = ah_tmp_icv(ahp->ahash, iph, ihl);
0133     memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
0134 
0135     top_iph->tos = iph->tos;
0136     top_iph->ttl = iph->ttl;
0137     top_iph->frag_off = iph->frag_off;
0138     if (top_iph->ihl != 5) {
0139         top_iph->daddr = iph->daddr;
0140         memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
0141     }
0142 
0143     kfree(AH_SKB_CB(skb)->tmp);
0144     xfrm_output_resume(skb->sk, skb, err);
0145 }
0146 
0147 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
0148 {
0149     int err;
0150     int nfrags;
0151     int ihl;
0152     u8 *icv;
0153     struct sk_buff *trailer;
0154     struct crypto_ahash *ahash;
0155     struct ahash_request *req;
0156     struct scatterlist *sg;
0157     struct iphdr *iph, *top_iph;
0158     struct ip_auth_hdr *ah;
0159     struct ah_data *ahp;
0160     int seqhi_len = 0;
0161     __be32 *seqhi;
0162     int sglists = 0;
0163     struct scatterlist *seqhisg;
0164 
0165     ahp = x->data;
0166     ahash = ahp->ahash;
0167 
0168     if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
0169         goto out;
0170     nfrags = err;
0171 
0172     skb_push(skb, -skb_network_offset(skb));
0173     ah = ip_auth_hdr(skb);
0174     ihl = ip_hdrlen(skb);
0175 
0176     if (x->props.flags & XFRM_STATE_ESN) {
0177         sglists = 1;
0178         seqhi_len = sizeof(*seqhi);
0179     }
0180     err = -ENOMEM;
0181     iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
0182     if (!iph)
0183         goto out;
0184     seqhi = (__be32 *)((char *)iph + ihl);
0185     icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
0186     req = ah_tmp_req(ahash, icv);
0187     sg = ah_req_sg(ahash, req);
0188     seqhisg = sg + nfrags;
0189 
0190     memset(ah->auth_data, 0, ahp->icv_trunc_len);
0191 
0192     top_iph = ip_hdr(skb);
0193 
0194     iph->tos = top_iph->tos;
0195     iph->ttl = top_iph->ttl;
0196     iph->frag_off = top_iph->frag_off;
0197 
0198     if (top_iph->ihl != 5) {
0199         iph->daddr = top_iph->daddr;
0200         memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
0201         err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
0202         if (err)
0203             goto out_free;
0204     }
0205 
0206     ah->nexthdr = *skb_mac_header(skb);
0207     *skb_mac_header(skb) = IPPROTO_AH;
0208 
0209     top_iph->tos = 0;
0210     top_iph->tot_len = htons(skb->len);
0211     top_iph->frag_off = 0;
0212     top_iph->ttl = 0;
0213     top_iph->check = 0;
0214 
0215     if (x->props.flags & XFRM_STATE_ALIGN4)
0216         ah->hdrlen  = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
0217     else
0218         ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
0219 
0220     ah->reserved = 0;
0221     ah->spi = x->id.spi;
0222     ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
0223 
0224     sg_init_table(sg, nfrags + sglists);
0225     err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
0226     if (unlikely(err < 0))
0227         goto out_free;
0228 
0229     if (x->props.flags & XFRM_STATE_ESN) {
0230         /* Attach seqhi sg right after packet payload */
0231         *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
0232         sg_set_buf(seqhisg, seqhi, seqhi_len);
0233     }
0234     ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
0235     ahash_request_set_callback(req, 0, ah_output_done, skb);
0236 
0237     AH_SKB_CB(skb)->tmp = iph;
0238 
0239     err = crypto_ahash_digest(req);
0240     if (err) {
0241         if (err == -EINPROGRESS)
0242             goto out;
0243 
0244         if (err == -ENOSPC)
0245             err = NET_XMIT_DROP;
0246         goto out_free;
0247     }
0248 
0249     memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
0250 
0251     top_iph->tos = iph->tos;
0252     top_iph->ttl = iph->ttl;
0253     top_iph->frag_off = iph->frag_off;
0254     if (top_iph->ihl != 5) {
0255         top_iph->daddr = iph->daddr;
0256         memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
0257     }
0258 
0259 out_free:
0260     kfree(iph);
0261 out:
0262     return err;
0263 }
0264 
0265 static void ah_input_done(struct crypto_async_request *base, int err)
0266 {
0267     u8 *auth_data;
0268     u8 *icv;
0269     struct iphdr *work_iph;
0270     struct sk_buff *skb = base->data;
0271     struct xfrm_state *x = xfrm_input_state(skb);
0272     struct ah_data *ahp = x->data;
0273     struct ip_auth_hdr *ah = ip_auth_hdr(skb);
0274     int ihl = ip_hdrlen(skb);
0275     int ah_hlen = (ah->hdrlen + 2) << 2;
0276 
0277     if (err)
0278         goto out;
0279 
0280     work_iph = AH_SKB_CB(skb)->tmp;
0281     auth_data = ah_tmp_auth(work_iph, ihl);
0282     icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
0283 
0284     err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
0285     if (err)
0286         goto out;
0287 
0288     err = ah->nexthdr;
0289 
0290     skb->network_header += ah_hlen;
0291     memcpy(skb_network_header(skb), work_iph, ihl);
0292     __skb_pull(skb, ah_hlen + ihl);
0293 
0294     if (x->props.mode == XFRM_MODE_TUNNEL)
0295         skb_reset_transport_header(skb);
0296     else
0297         skb_set_transport_header(skb, -ihl);
0298 out:
0299     kfree(AH_SKB_CB(skb)->tmp);
0300     xfrm_input_resume(skb, err);
0301 }
0302 
0303 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
0304 {
0305     int ah_hlen;
0306     int ihl;
0307     int nexthdr;
0308     int nfrags;
0309     u8 *auth_data;
0310     u8 *icv;
0311     struct sk_buff *trailer;
0312     struct crypto_ahash *ahash;
0313     struct ahash_request *req;
0314     struct scatterlist *sg;
0315     struct iphdr *iph, *work_iph;
0316     struct ip_auth_hdr *ah;
0317     struct ah_data *ahp;
0318     int err = -ENOMEM;
0319     int seqhi_len = 0;
0320     __be32 *seqhi;
0321     int sglists = 0;
0322     struct scatterlist *seqhisg;
0323 
0324     if (!pskb_may_pull(skb, sizeof(*ah)))
0325         goto out;
0326 
0327     ah = (struct ip_auth_hdr *)skb->data;
0328     ahp = x->data;
0329     ahash = ahp->ahash;
0330 
0331     nexthdr = ah->nexthdr;
0332     ah_hlen = (ah->hdrlen + 2) << 2;
0333 
0334     if (x->props.flags & XFRM_STATE_ALIGN4) {
0335         if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
0336             ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
0337             goto out;
0338     } else {
0339         if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
0340             ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
0341             goto out;
0342     }
0343 
0344     if (!pskb_may_pull(skb, ah_hlen))
0345         goto out;
0346 
0347     /* We are going to _remove_ AH header to keep sockets happy,
0348      * so... Later this can change. */
0349     if (skb_unclone(skb, GFP_ATOMIC))
0350         goto out;
0351 
0352     skb->ip_summed = CHECKSUM_NONE;
0353 
0354 
0355     if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
0356         goto out;
0357     nfrags = err;
0358 
0359     ah = (struct ip_auth_hdr *)skb->data;
0360     iph = ip_hdr(skb);
0361     ihl = ip_hdrlen(skb);
0362 
0363     if (x->props.flags & XFRM_STATE_ESN) {
0364         sglists = 1;
0365         seqhi_len = sizeof(*seqhi);
0366     }
0367 
0368     work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
0369                 ahp->icv_trunc_len + seqhi_len);
0370     if (!work_iph) {
0371         err = -ENOMEM;
0372         goto out;
0373     }
0374 
0375     seqhi = (__be32 *)((char *)work_iph + ihl);
0376     auth_data = ah_tmp_auth(seqhi, seqhi_len);
0377     icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
0378     req = ah_tmp_req(ahash, icv);
0379     sg = ah_req_sg(ahash, req);
0380     seqhisg = sg + nfrags;
0381 
0382     memcpy(work_iph, iph, ihl);
0383     memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
0384     memset(ah->auth_data, 0, ahp->icv_trunc_len);
0385 
0386     iph->ttl = 0;
0387     iph->tos = 0;
0388     iph->frag_off = 0;
0389     iph->check = 0;
0390     if (ihl > sizeof(*iph)) {
0391         __be32 dummy;
0392         err = ip_clear_mutable_options(iph, &dummy);
0393         if (err)
0394             goto out_free;
0395     }
0396 
0397     skb_push(skb, ihl);
0398 
0399     sg_init_table(sg, nfrags + sglists);
0400     err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
0401     if (unlikely(err < 0))
0402         goto out_free;
0403 
0404     if (x->props.flags & XFRM_STATE_ESN) {
0405         /* Attach seqhi sg right after packet payload */
0406         *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
0407         sg_set_buf(seqhisg, seqhi, seqhi_len);
0408     }
0409     ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
0410     ahash_request_set_callback(req, 0, ah_input_done, skb);
0411 
0412     AH_SKB_CB(skb)->tmp = work_iph;
0413 
0414     err = crypto_ahash_digest(req);
0415     if (err) {
0416         if (err == -EINPROGRESS)
0417             goto out;
0418 
0419         goto out_free;
0420     }
0421 
0422     err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
0423     if (err)
0424         goto out_free;
0425 
0426     skb->network_header += ah_hlen;
0427     memcpy(skb_network_header(skb), work_iph, ihl);
0428     __skb_pull(skb, ah_hlen + ihl);
0429     if (x->props.mode == XFRM_MODE_TUNNEL)
0430         skb_reset_transport_header(skb);
0431     else
0432         skb_set_transport_header(skb, -ihl);
0433 
0434     err = nexthdr;
0435 
0436 out_free:
0437     kfree (work_iph);
0438 out:
0439     return err;
0440 }
0441 
0442 static int ah4_err(struct sk_buff *skb, u32 info)
0443 {
0444     struct net *net = dev_net(skb->dev);
0445     const struct iphdr *iph = (const struct iphdr *)skb->data;
0446     struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
0447     struct xfrm_state *x;
0448 
0449     switch (icmp_hdr(skb)->type) {
0450     case ICMP_DEST_UNREACH:
0451         if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
0452             return 0;
0453         break;
0454     case ICMP_REDIRECT:
0455         break;
0456     default:
0457         return 0;
0458     }
0459 
0460     x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
0461                   ah->spi, IPPROTO_AH, AF_INET);
0462     if (!x)
0463         return 0;
0464 
0465     if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
0466         ipv4_update_pmtu(skb, net, info, 0, IPPROTO_AH);
0467     else
0468         ipv4_redirect(skb, net, 0, IPPROTO_AH);
0469     xfrm_state_put(x);
0470 
0471     return 0;
0472 }
0473 
0474 static int ah_init_state(struct xfrm_state *x)
0475 {
0476     struct ah_data *ahp = NULL;
0477     struct xfrm_algo_desc *aalg_desc;
0478     struct crypto_ahash *ahash;
0479 
0480     if (!x->aalg)
0481         goto error;
0482 
0483     if (x->encap)
0484         goto error;
0485 
0486     ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
0487     if (!ahp)
0488         return -ENOMEM;
0489 
0490     ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
0491     if (IS_ERR(ahash))
0492         goto error;
0493 
0494     ahp->ahash = ahash;
0495     if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
0496                 (x->aalg->alg_key_len + 7) / 8))
0497         goto error;
0498 
0499     /*
0500      * Lookup the algorithm description maintained by xfrm_algo,
0501      * verify crypto transform properties, and store information
0502      * we need for AH processing.  This lookup cannot fail here
0503      * after a successful crypto_alloc_ahash().
0504      */
0505     aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
0506     BUG_ON(!aalg_desc);
0507 
0508     if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
0509         crypto_ahash_digestsize(ahash)) {
0510         pr_info("%s: %s digestsize %u != %u\n",
0511             __func__, x->aalg->alg_name,
0512             crypto_ahash_digestsize(ahash),
0513             aalg_desc->uinfo.auth.icv_fullbits / 8);
0514         goto error;
0515     }
0516 
0517     ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
0518     ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
0519 
0520     if (x->props.flags & XFRM_STATE_ALIGN4)
0521         x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
0522                           ahp->icv_trunc_len);
0523     else
0524         x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
0525                           ahp->icv_trunc_len);
0526     if (x->props.mode == XFRM_MODE_TUNNEL)
0527         x->props.header_len += sizeof(struct iphdr);
0528     x->data = ahp;
0529 
0530     return 0;
0531 
0532 error:
0533     if (ahp) {
0534         crypto_free_ahash(ahp->ahash);
0535         kfree(ahp);
0536     }
0537     return -EINVAL;
0538 }
0539 
0540 static void ah_destroy(struct xfrm_state *x)
0541 {
0542     struct ah_data *ahp = x->data;
0543 
0544     if (!ahp)
0545         return;
0546 
0547     crypto_free_ahash(ahp->ahash);
0548     kfree(ahp);
0549 }
0550 
0551 static int ah4_rcv_cb(struct sk_buff *skb, int err)
0552 {
0553     return 0;
0554 }
0555 
0556 static const struct xfrm_type ah_type =
0557 {
0558     .owner      = THIS_MODULE,
0559     .proto          = IPPROTO_AH,
0560     .flags      = XFRM_TYPE_REPLAY_PROT,
0561     .init_state = ah_init_state,
0562     .destructor = ah_destroy,
0563     .input      = ah_input,
0564     .output     = ah_output
0565 };
0566 
0567 static struct xfrm4_protocol ah4_protocol = {
0568     .handler    =   xfrm4_rcv,
0569     .input_handler  =   xfrm_input,
0570     .cb_handler =   ah4_rcv_cb,
0571     .err_handler    =   ah4_err,
0572     .priority   =   0,
0573 };
0574 
0575 static int __init ah4_init(void)
0576 {
0577     if (xfrm_register_type(&ah_type, AF_INET) < 0) {
0578         pr_info("%s: can't add xfrm type\n", __func__);
0579         return -EAGAIN;
0580     }
0581     if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
0582         pr_info("%s: can't add protocol\n", __func__);
0583         xfrm_unregister_type(&ah_type, AF_INET);
0584         return -EAGAIN;
0585     }
0586     return 0;
0587 }
0588 
0589 static void __exit ah4_fini(void)
0590 {
0591     if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
0592         pr_info("%s: can't remove protocol\n", __func__);
0593     xfrm_unregister_type(&ah_type, AF_INET);
0594 }
0595 
0596 module_init(ah4_init);
0597 module_exit(ah4_fini);
0598 MODULE_LICENSE("GPL");
0599 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);