0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define pr_fmt(fmt) "IPv6: " fmt
0015
0016 #include <crypto/aead.h>
0017 #include <crypto/authenc.h>
0018 #include <linux/err.h>
0019 #include <linux/module.h>
0020 #include <net/ip.h>
0021 #include <net/xfrm.h>
0022 #include <net/esp.h>
0023 #include <linux/scatterlist.h>
0024 #include <linux/kernel.h>
0025 #include <linux/pfkeyv2.h>
0026 #include <linux/random.h>
0027 #include <linux/slab.h>
0028 #include <linux/spinlock.h>
0029 #include <net/ip6_checksum.h>
0030 #include <net/ip6_route.h>
0031 #include <net/icmp.h>
0032 #include <net/ipv6.h>
0033 #include <net/protocol.h>
0034 #include <net/udp.h>
0035 #include <linux/icmpv6.h>
0036 #include <net/tcp.h>
0037 #include <net/espintcp.h>
0038 #include <net/inet6_hashtables.h>
0039
0040 #include <linux/highmem.h>
0041
0042 struct esp_skb_cb {
0043 struct xfrm_skb_cb xfrm;
0044 void *tmp;
0045 };
0046
0047 struct esp_output_extra {
0048 __be32 seqhi;
0049 u32 esphoff;
0050 };
0051
0052 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
0064 {
0065 unsigned int len;
0066
0067 len = seqihlen;
0068
0069 len += crypto_aead_ivsize(aead);
0070
0071 if (len) {
0072 len += crypto_aead_alignmask(aead) &
0073 ~(crypto_tfm_ctx_alignment() - 1);
0074 len = ALIGN(len, crypto_tfm_ctx_alignment());
0075 }
0076
0077 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
0078 len = ALIGN(len, __alignof__(struct scatterlist));
0079
0080 len += sizeof(struct scatterlist) * nfrags;
0081
0082 return kmalloc(len, GFP_ATOMIC);
0083 }
0084
0085 static inline void *esp_tmp_extra(void *tmp)
0086 {
0087 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
0088 }
0089
0090 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
0091 {
0092 return crypto_aead_ivsize(aead) ?
0093 PTR_ALIGN((u8 *)tmp + seqhilen,
0094 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
0095 }
0096
0097 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
0098 {
0099 struct aead_request *req;
0100
0101 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
0102 crypto_tfm_ctx_alignment());
0103 aead_request_set_tfm(req, aead);
0104 return req;
0105 }
0106
0107 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
0108 struct aead_request *req)
0109 {
0110 return (void *)ALIGN((unsigned long)(req + 1) +
0111 crypto_aead_reqsize(aead),
0112 __alignof__(struct scatterlist));
0113 }
0114
0115 static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
0116 {
0117 struct crypto_aead *aead = x->data;
0118 int extralen = 0;
0119 u8 *iv;
0120 struct aead_request *req;
0121 struct scatterlist *sg;
0122
0123 if (x->props.flags & XFRM_STATE_ESN)
0124 extralen += sizeof(struct esp_output_extra);
0125
0126 iv = esp_tmp_iv(aead, tmp, extralen);
0127 req = esp_tmp_req(aead, iv);
0128
0129
0130
0131
0132 if (req->src != req->dst)
0133 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
0134 put_page(sg_page(sg));
0135 }
0136
0137 #ifdef CONFIG_INET6_ESPINTCP
0138 struct esp_tcp_sk {
0139 struct sock *sk;
0140 struct rcu_head rcu;
0141 };
0142
0143 static void esp_free_tcp_sk(struct rcu_head *head)
0144 {
0145 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
0146
0147 sock_put(esk->sk);
0148 kfree(esk);
0149 }
0150
0151 static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
0152 {
0153 struct xfrm_encap_tmpl *encap = x->encap;
0154 struct esp_tcp_sk *esk;
0155 __be16 sport, dport;
0156 struct sock *nsk;
0157 struct sock *sk;
0158
0159 sk = rcu_dereference(x->encap_sk);
0160 if (sk && sk->sk_state == TCP_ESTABLISHED)
0161 return sk;
0162
0163 spin_lock_bh(&x->lock);
0164 sport = encap->encap_sport;
0165 dport = encap->encap_dport;
0166 nsk = rcu_dereference_protected(x->encap_sk,
0167 lockdep_is_held(&x->lock));
0168 if (sk && sk == nsk) {
0169 esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
0170 if (!esk) {
0171 spin_unlock_bh(&x->lock);
0172 return ERR_PTR(-ENOMEM);
0173 }
0174 RCU_INIT_POINTER(x->encap_sk, NULL);
0175 esk->sk = sk;
0176 call_rcu(&esk->rcu, esp_free_tcp_sk);
0177 }
0178 spin_unlock_bh(&x->lock);
0179
0180 sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6,
0181 dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
0182 if (!sk)
0183 return ERR_PTR(-ENOENT);
0184
0185 if (!tcp_is_ulp_esp(sk)) {
0186 sock_put(sk);
0187 return ERR_PTR(-EINVAL);
0188 }
0189
0190 spin_lock_bh(&x->lock);
0191 nsk = rcu_dereference_protected(x->encap_sk,
0192 lockdep_is_held(&x->lock));
0193 if (encap->encap_sport != sport ||
0194 encap->encap_dport != dport) {
0195 sock_put(sk);
0196 sk = nsk ?: ERR_PTR(-EREMCHG);
0197 } else if (sk == nsk) {
0198 sock_put(sk);
0199 } else {
0200 rcu_assign_pointer(x->encap_sk, sk);
0201 }
0202 spin_unlock_bh(&x->lock);
0203
0204 return sk;
0205 }
0206
0207 static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
0208 {
0209 struct sock *sk;
0210 int err;
0211
0212 rcu_read_lock();
0213
0214 sk = esp6_find_tcp_sk(x);
0215 err = PTR_ERR_OR_ZERO(sk);
0216 if (err)
0217 goto out;
0218
0219 bh_lock_sock(sk);
0220 if (sock_owned_by_user(sk))
0221 err = espintcp_queue_out(sk, skb);
0222 else
0223 err = espintcp_push_skb(sk, skb);
0224 bh_unlock_sock(sk);
0225
0226 out:
0227 rcu_read_unlock();
0228 return err;
0229 }
0230
0231 static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
0232 struct sk_buff *skb)
0233 {
0234 struct dst_entry *dst = skb_dst(skb);
0235 struct xfrm_state *x = dst->xfrm;
0236
0237 return esp_output_tcp_finish(x, skb);
0238 }
0239
0240 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
0241 {
0242 int err;
0243
0244 local_bh_disable();
0245 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
0246 local_bh_enable();
0247
0248
0249
0250
0251
0252 return err ?: -EINPROGRESS;
0253 }
0254 #else
0255 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
0256 {
0257 kfree_skb(skb);
0258
0259 return -EOPNOTSUPP;
0260 }
0261 #endif
0262
0263 static void esp_output_encap_csum(struct sk_buff *skb)
0264 {
0265
0266 if (*skb_mac_header(skb) == IPPROTO_UDP) {
0267 struct udphdr *uh = udp_hdr(skb);
0268 struct ipv6hdr *ip6h = ipv6_hdr(skb);
0269 int len = ntohs(uh->len);
0270 unsigned int offset = skb_transport_offset(skb);
0271 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
0272
0273 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0274 len, IPPROTO_UDP, csum);
0275 if (uh->check == 0)
0276 uh->check = CSUM_MANGLED_0;
0277 }
0278 }
0279
0280 static void esp_output_done(struct crypto_async_request *base, int err)
0281 {
0282 struct sk_buff *skb = base->data;
0283 struct xfrm_offload *xo = xfrm_offload(skb);
0284 void *tmp;
0285 struct xfrm_state *x;
0286
0287 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
0288 struct sec_path *sp = skb_sec_path(skb);
0289
0290 x = sp->xvec[sp->len - 1];
0291 } else {
0292 x = skb_dst(skb)->xfrm;
0293 }
0294
0295 tmp = ESP_SKB_CB(skb)->tmp;
0296 esp_ssg_unref(x, tmp);
0297 kfree(tmp);
0298
0299 esp_output_encap_csum(skb);
0300
0301 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
0302 if (err) {
0303 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
0304 kfree_skb(skb);
0305 return;
0306 }
0307
0308 skb_push(skb, skb->data - skb_mac_header(skb));
0309 secpath_reset(skb);
0310 xfrm_dev_resume(skb);
0311 } else {
0312 if (!err &&
0313 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
0314 esp_output_tail_tcp(x, skb);
0315 else
0316 xfrm_output_resume(skb->sk, skb, err);
0317 }
0318 }
0319
0320
0321 static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
0322 {
0323 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
0324 void *tmp = ESP_SKB_CB(skb)->tmp;
0325 __be32 *seqhi = esp_tmp_extra(tmp);
0326
0327 esph->seq_no = esph->spi;
0328 esph->spi = *seqhi;
0329 }
0330
0331 static void esp_output_restore_header(struct sk_buff *skb)
0332 {
0333 void *tmp = ESP_SKB_CB(skb)->tmp;
0334 struct esp_output_extra *extra = esp_tmp_extra(tmp);
0335
0336 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
0337 sizeof(__be32));
0338 }
0339
0340 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
0341 struct xfrm_state *x,
0342 struct ip_esp_hdr *esph,
0343 struct esp_output_extra *extra)
0344 {
0345
0346
0347
0348
0349 if ((x->props.flags & XFRM_STATE_ESN)) {
0350 __u32 seqhi;
0351 struct xfrm_offload *xo = xfrm_offload(skb);
0352
0353 if (xo)
0354 seqhi = xo->seq.hi;
0355 else
0356 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
0357
0358 extra->esphoff = (unsigned char *)esph -
0359 skb_transport_header(skb);
0360 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
0361 extra->seqhi = esph->spi;
0362 esph->seq_no = htonl(seqhi);
0363 }
0364
0365 esph->spi = x->id.spi;
0366
0367 return esph;
0368 }
0369
0370 static void esp_output_done_esn(struct crypto_async_request *base, int err)
0371 {
0372 struct sk_buff *skb = base->data;
0373
0374 esp_output_restore_header(skb);
0375 esp_output_done(base, err);
0376 }
0377
0378 static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
0379 int encap_type,
0380 struct esp_info *esp,
0381 __be16 sport,
0382 __be16 dport)
0383 {
0384 struct udphdr *uh;
0385 __be32 *udpdata32;
0386 unsigned int len;
0387
0388 len = skb->len + esp->tailen - skb_transport_offset(skb);
0389 if (len > U16_MAX)
0390 return ERR_PTR(-EMSGSIZE);
0391
0392 uh = (struct udphdr *)esp->esph;
0393 uh->source = sport;
0394 uh->dest = dport;
0395 uh->len = htons(len);
0396 uh->check = 0;
0397
0398 *skb_mac_header(skb) = IPPROTO_UDP;
0399
0400 if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
0401 udpdata32 = (__be32 *)(uh + 1);
0402 udpdata32[0] = udpdata32[1] = 0;
0403 return (struct ip_esp_hdr *)(udpdata32 + 2);
0404 }
0405
0406 return (struct ip_esp_hdr *)(uh + 1);
0407 }
0408
0409 #ifdef CONFIG_INET6_ESPINTCP
0410 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
0411 struct sk_buff *skb,
0412 struct esp_info *esp)
0413 {
0414 __be16 *lenp = (void *)esp->esph;
0415 struct ip_esp_hdr *esph;
0416 unsigned int len;
0417 struct sock *sk;
0418
0419 len = skb->len + esp->tailen - skb_transport_offset(skb);
0420 if (len > IP_MAX_MTU)
0421 return ERR_PTR(-EMSGSIZE);
0422
0423 rcu_read_lock();
0424 sk = esp6_find_tcp_sk(x);
0425 rcu_read_unlock();
0426
0427 if (IS_ERR(sk))
0428 return ERR_CAST(sk);
0429
0430 *lenp = htons(len);
0431 esph = (struct ip_esp_hdr *)(lenp + 1);
0432
0433 return esph;
0434 }
0435 #else
0436 static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
0437 struct sk_buff *skb,
0438 struct esp_info *esp)
0439 {
0440 return ERR_PTR(-EOPNOTSUPP);
0441 }
0442 #endif
0443
0444 static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
0445 struct esp_info *esp)
0446 {
0447 struct xfrm_encap_tmpl *encap = x->encap;
0448 struct ip_esp_hdr *esph;
0449 __be16 sport, dport;
0450 int encap_type;
0451
0452 spin_lock_bh(&x->lock);
0453 sport = encap->encap_sport;
0454 dport = encap->encap_dport;
0455 encap_type = encap->encap_type;
0456 spin_unlock_bh(&x->lock);
0457
0458 switch (encap_type) {
0459 default:
0460 case UDP_ENCAP_ESPINUDP:
0461 case UDP_ENCAP_ESPINUDP_NON_IKE:
0462 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
0463 break;
0464 case TCP_ENCAP_ESPINTCP:
0465 esph = esp6_output_tcp_encap(x, skb, esp);
0466 break;
0467 }
0468
0469 if (IS_ERR(esph))
0470 return PTR_ERR(esph);
0471
0472 esp->esph = esph;
0473
0474 return 0;
0475 }
0476
0477 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
0478 {
0479 u8 *tail;
0480 int nfrags;
0481 int esph_offset;
0482 struct page *page;
0483 struct sk_buff *trailer;
0484 int tailen = esp->tailen;
0485
0486 if (x->encap) {
0487 int err = esp6_output_encap(x, skb, esp);
0488
0489 if (err < 0)
0490 return err;
0491 }
0492
0493 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
0494 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
0495 goto cow;
0496
0497 if (!skb_cloned(skb)) {
0498 if (tailen <= skb_tailroom(skb)) {
0499 nfrags = 1;
0500 trailer = skb;
0501 tail = skb_tail_pointer(trailer);
0502
0503 goto skip_cow;
0504 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
0505 && !skb_has_frag_list(skb)) {
0506 int allocsize;
0507 struct sock *sk = skb->sk;
0508 struct page_frag *pfrag = &x->xfrag;
0509
0510 esp->inplace = false;
0511
0512 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
0513
0514 spin_lock_bh(&x->lock);
0515
0516 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
0517 spin_unlock_bh(&x->lock);
0518 goto cow;
0519 }
0520
0521 page = pfrag->page;
0522 get_page(page);
0523
0524 tail = page_address(page) + pfrag->offset;
0525
0526 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
0527
0528 nfrags = skb_shinfo(skb)->nr_frags;
0529
0530 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
0531 tailen);
0532 skb_shinfo(skb)->nr_frags = ++nfrags;
0533
0534 pfrag->offset = pfrag->offset + allocsize;
0535
0536 spin_unlock_bh(&x->lock);
0537
0538 nfrags++;
0539
0540 skb->len += tailen;
0541 skb->data_len += tailen;
0542 skb->truesize += tailen;
0543 if (sk && sk_fullsock(sk))
0544 refcount_add(tailen, &sk->sk_wmem_alloc);
0545
0546 goto out;
0547 }
0548 }
0549
0550 cow:
0551 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
0552
0553 nfrags = skb_cow_data(skb, tailen, &trailer);
0554 if (nfrags < 0)
0555 goto out;
0556 tail = skb_tail_pointer(trailer);
0557 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
0558
0559 skip_cow:
0560 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
0561 pskb_put(skb, trailer, tailen);
0562
0563 out:
0564 return nfrags;
0565 }
0566 EXPORT_SYMBOL_GPL(esp6_output_head);
0567
0568 int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
0569 {
0570 u8 *iv;
0571 int alen;
0572 void *tmp;
0573 int ivlen;
0574 int assoclen;
0575 int extralen;
0576 struct page *page;
0577 struct ip_esp_hdr *esph;
0578 struct aead_request *req;
0579 struct crypto_aead *aead;
0580 struct scatterlist *sg, *dsg;
0581 struct esp_output_extra *extra;
0582 int err = -ENOMEM;
0583
0584 assoclen = sizeof(struct ip_esp_hdr);
0585 extralen = 0;
0586
0587 if (x->props.flags & XFRM_STATE_ESN) {
0588 extralen += sizeof(*extra);
0589 assoclen += sizeof(__be32);
0590 }
0591
0592 aead = x->data;
0593 alen = crypto_aead_authsize(aead);
0594 ivlen = crypto_aead_ivsize(aead);
0595
0596 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
0597 if (!tmp)
0598 goto error;
0599
0600 extra = esp_tmp_extra(tmp);
0601 iv = esp_tmp_iv(aead, tmp, extralen);
0602 req = esp_tmp_req(aead, iv);
0603 sg = esp_req_sg(aead, req);
0604
0605 if (esp->inplace)
0606 dsg = sg;
0607 else
0608 dsg = &sg[esp->nfrags];
0609
0610 esph = esp_output_set_esn(skb, x, esp->esph, extra);
0611 esp->esph = esph;
0612
0613 sg_init_table(sg, esp->nfrags);
0614 err = skb_to_sgvec(skb, sg,
0615 (unsigned char *)esph - skb->data,
0616 assoclen + ivlen + esp->clen + alen);
0617 if (unlikely(err < 0))
0618 goto error_free;
0619
0620 if (!esp->inplace) {
0621 int allocsize;
0622 struct page_frag *pfrag = &x->xfrag;
0623
0624 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
0625
0626 spin_lock_bh(&x->lock);
0627 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
0628 spin_unlock_bh(&x->lock);
0629 goto error_free;
0630 }
0631
0632 skb_shinfo(skb)->nr_frags = 1;
0633
0634 page = pfrag->page;
0635 get_page(page);
0636
0637 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
0638 pfrag->offset = pfrag->offset + allocsize;
0639 spin_unlock_bh(&x->lock);
0640
0641 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
0642 err = skb_to_sgvec(skb, dsg,
0643 (unsigned char *)esph - skb->data,
0644 assoclen + ivlen + esp->clen + alen);
0645 if (unlikely(err < 0))
0646 goto error_free;
0647 }
0648
0649 if ((x->props.flags & XFRM_STATE_ESN))
0650 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
0651 else
0652 aead_request_set_callback(req, 0, esp_output_done, skb);
0653
0654 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
0655 aead_request_set_ad(req, assoclen);
0656
0657 memset(iv, 0, ivlen);
0658 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
0659 min(ivlen, 8));
0660
0661 ESP_SKB_CB(skb)->tmp = tmp;
0662 err = crypto_aead_encrypt(req);
0663
0664 switch (err) {
0665 case -EINPROGRESS:
0666 goto error;
0667
0668 case -ENOSPC:
0669 err = NET_XMIT_DROP;
0670 break;
0671
0672 case 0:
0673 if ((x->props.flags & XFRM_STATE_ESN))
0674 esp_output_restore_header(skb);
0675 esp_output_encap_csum(skb);
0676 }
0677
0678 if (sg != dsg)
0679 esp_ssg_unref(x, tmp);
0680
0681 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
0682 err = esp_output_tail_tcp(x, skb);
0683
0684 error_free:
0685 kfree(tmp);
0686 error:
0687 return err;
0688 }
0689 EXPORT_SYMBOL_GPL(esp6_output_tail);
0690
0691 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
0692 {
0693 int alen;
0694 int blksize;
0695 struct ip_esp_hdr *esph;
0696 struct crypto_aead *aead;
0697 struct esp_info esp;
0698
0699 esp.inplace = true;
0700
0701 esp.proto = *skb_mac_header(skb);
0702 *skb_mac_header(skb) = IPPROTO_ESP;
0703
0704
0705
0706 aead = x->data;
0707 alen = crypto_aead_authsize(aead);
0708
0709 esp.tfclen = 0;
0710 if (x->tfcpad) {
0711 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
0712 u32 padto;
0713
0714 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
0715 if (skb->len < padto)
0716 esp.tfclen = padto - skb->len;
0717 }
0718 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
0719 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
0720 esp.plen = esp.clen - skb->len - esp.tfclen;
0721 esp.tailen = esp.tfclen + esp.plen + alen;
0722
0723 esp.esph = ip_esp_hdr(skb);
0724
0725 esp.nfrags = esp6_output_head(x, skb, &esp);
0726 if (esp.nfrags < 0)
0727 return esp.nfrags;
0728
0729 esph = esp.esph;
0730 esph->spi = x->id.spi;
0731
0732 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
0733 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
0734 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
0735
0736 skb_push(skb, -skb_network_offset(skb));
0737
0738 return esp6_output_tail(x, skb, &esp);
0739 }
0740
0741 static inline int esp_remove_trailer(struct sk_buff *skb)
0742 {
0743 struct xfrm_state *x = xfrm_input_state(skb);
0744 struct crypto_aead *aead = x->data;
0745 int alen, hlen, elen;
0746 int padlen, trimlen;
0747 __wsum csumdiff;
0748 u8 nexthdr[2];
0749 int ret;
0750
0751 alen = crypto_aead_authsize(aead);
0752 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
0753 elen = skb->len - hlen;
0754
0755 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
0756 BUG_ON(ret);
0757
0758 ret = -EINVAL;
0759 padlen = nexthdr[0];
0760 if (padlen + 2 + alen >= elen) {
0761 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
0762 padlen + 2, elen - alen);
0763 goto out;
0764 }
0765
0766 trimlen = alen + padlen + 2;
0767 if (skb->ip_summed == CHECKSUM_COMPLETE) {
0768 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
0769 skb->csum = csum_block_sub(skb->csum, csumdiff,
0770 skb->len - trimlen);
0771 }
0772 pskb_trim(skb, skb->len - trimlen);
0773
0774 ret = nexthdr[1];
0775
0776 out:
0777 return ret;
0778 }
0779
0780 int esp6_input_done2(struct sk_buff *skb, int err)
0781 {
0782 struct xfrm_state *x = xfrm_input_state(skb);
0783 struct xfrm_offload *xo = xfrm_offload(skb);
0784 struct crypto_aead *aead = x->data;
0785 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
0786 int hdr_len = skb_network_header_len(skb);
0787
0788 if (!xo || !(xo->flags & CRYPTO_DONE))
0789 kfree(ESP_SKB_CB(skb)->tmp);
0790
0791 if (unlikely(err))
0792 goto out;
0793
0794 err = esp_remove_trailer(skb);
0795 if (unlikely(err < 0))
0796 goto out;
0797
0798 if (x->encap) {
0799 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
0800 int offset = skb_network_offset(skb) + sizeof(*ip6h);
0801 struct xfrm_encap_tmpl *encap = x->encap;
0802 u8 nexthdr = ip6h->nexthdr;
0803 __be16 frag_off, source;
0804 struct udphdr *uh;
0805 struct tcphdr *th;
0806
0807 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
0808 if (offset == -1) {
0809 err = -EINVAL;
0810 goto out;
0811 }
0812
0813 uh = (void *)(skb->data + offset);
0814 th = (void *)(skb->data + offset);
0815 hdr_len += offset;
0816
0817 switch (x->encap->encap_type) {
0818 case TCP_ENCAP_ESPINTCP:
0819 source = th->source;
0820 break;
0821 case UDP_ENCAP_ESPINUDP:
0822 case UDP_ENCAP_ESPINUDP_NON_IKE:
0823 source = uh->source;
0824 break;
0825 default:
0826 WARN_ON_ONCE(1);
0827 err = -EINVAL;
0828 goto out;
0829 }
0830
0831
0832
0833
0834
0835
0836
0837 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
0838 source != encap->encap_sport) {
0839 xfrm_address_t ipaddr;
0840
0841 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
0842 km_new_mapping(x, &ipaddr, source);
0843
0844
0845
0846
0847
0848
0849
0850
0851 }
0852
0853
0854
0855
0856
0857
0858
0859
0860 if (x->props.mode == XFRM_MODE_TRANSPORT)
0861 skb->ip_summed = CHECKSUM_UNNECESSARY;
0862 }
0863
0864 skb_postpull_rcsum(skb, skb_network_header(skb),
0865 skb_network_header_len(skb));
0866 skb_pull_rcsum(skb, hlen);
0867 if (x->props.mode == XFRM_MODE_TUNNEL)
0868 skb_reset_transport_header(skb);
0869 else
0870 skb_set_transport_header(skb, -hdr_len);
0871
0872
0873 if (err == IPPROTO_NONE)
0874 err = -EINVAL;
0875
0876 out:
0877 return err;
0878 }
0879 EXPORT_SYMBOL_GPL(esp6_input_done2);
0880
0881 static void esp_input_done(struct crypto_async_request *base, int err)
0882 {
0883 struct sk_buff *skb = base->data;
0884
0885 xfrm_input_resume(skb, esp6_input_done2(skb, err));
0886 }
0887
0888 static void esp_input_restore_header(struct sk_buff *skb)
0889 {
0890 esp_restore_header(skb, 0);
0891 __skb_pull(skb, 4);
0892 }
0893
0894 static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
0895 {
0896 struct xfrm_state *x = xfrm_input_state(skb);
0897
0898
0899
0900
0901
0902 if ((x->props.flags & XFRM_STATE_ESN)) {
0903 struct ip_esp_hdr *esph = skb_push(skb, 4);
0904
0905 *seqhi = esph->spi;
0906 esph->spi = esph->seq_no;
0907 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
0908 }
0909 }
0910
0911 static void esp_input_done_esn(struct crypto_async_request *base, int err)
0912 {
0913 struct sk_buff *skb = base->data;
0914
0915 esp_input_restore_header(skb);
0916 esp_input_done(base, err);
0917 }
0918
0919 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
0920 {
0921 struct crypto_aead *aead = x->data;
0922 struct aead_request *req;
0923 struct sk_buff *trailer;
0924 int ivlen = crypto_aead_ivsize(aead);
0925 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
0926 int nfrags;
0927 int assoclen;
0928 int seqhilen;
0929 int ret = 0;
0930 void *tmp;
0931 __be32 *seqhi;
0932 u8 *iv;
0933 struct scatterlist *sg;
0934
0935 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
0936 ret = -EINVAL;
0937 goto out;
0938 }
0939
0940 if (elen <= 0) {
0941 ret = -EINVAL;
0942 goto out;
0943 }
0944
0945 assoclen = sizeof(struct ip_esp_hdr);
0946 seqhilen = 0;
0947
0948 if (x->props.flags & XFRM_STATE_ESN) {
0949 seqhilen += sizeof(__be32);
0950 assoclen += seqhilen;
0951 }
0952
0953 if (!skb_cloned(skb)) {
0954 if (!skb_is_nonlinear(skb)) {
0955 nfrags = 1;
0956
0957 goto skip_cow;
0958 } else if (!skb_has_frag_list(skb)) {
0959 nfrags = skb_shinfo(skb)->nr_frags;
0960 nfrags++;
0961
0962 goto skip_cow;
0963 }
0964 }
0965
0966 nfrags = skb_cow_data(skb, 0, &trailer);
0967 if (nfrags < 0) {
0968 ret = -EINVAL;
0969 goto out;
0970 }
0971
0972 skip_cow:
0973 ret = -ENOMEM;
0974 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
0975 if (!tmp)
0976 goto out;
0977
0978 ESP_SKB_CB(skb)->tmp = tmp;
0979 seqhi = esp_tmp_extra(tmp);
0980 iv = esp_tmp_iv(aead, tmp, seqhilen);
0981 req = esp_tmp_req(aead, iv);
0982 sg = esp_req_sg(aead, req);
0983
0984 esp_input_set_header(skb, seqhi);
0985
0986 sg_init_table(sg, nfrags);
0987 ret = skb_to_sgvec(skb, sg, 0, skb->len);
0988 if (unlikely(ret < 0)) {
0989 kfree(tmp);
0990 goto out;
0991 }
0992
0993 skb->ip_summed = CHECKSUM_NONE;
0994
0995 if ((x->props.flags & XFRM_STATE_ESN))
0996 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
0997 else
0998 aead_request_set_callback(req, 0, esp_input_done, skb);
0999
1000 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
1001 aead_request_set_ad(req, assoclen);
1002
1003 ret = crypto_aead_decrypt(req);
1004 if (ret == -EINPROGRESS)
1005 goto out;
1006
1007 if ((x->props.flags & XFRM_STATE_ESN))
1008 esp_input_restore_header(skb);
1009
1010 ret = esp6_input_done2(skb, ret);
1011
1012 out:
1013 return ret;
1014 }
1015
1016 static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1017 u8 type, u8 code, int offset, __be32 info)
1018 {
1019 struct net *net = dev_net(skb->dev);
1020 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
1021 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
1022 struct xfrm_state *x;
1023
1024 if (type != ICMPV6_PKT_TOOBIG &&
1025 type != NDISC_REDIRECT)
1026 return 0;
1027
1028 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1029 esph->spi, IPPROTO_ESP, AF_INET6);
1030 if (!x)
1031 return 0;
1032
1033 if (type == NDISC_REDIRECT)
1034 ip6_redirect(skb, net, skb->dev->ifindex, 0,
1035 sock_net_uid(net, NULL));
1036 else
1037 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
1038 xfrm_state_put(x);
1039
1040 return 0;
1041 }
1042
1043 static void esp6_destroy(struct xfrm_state *x)
1044 {
1045 struct crypto_aead *aead = x->data;
1046
1047 if (!aead)
1048 return;
1049
1050 crypto_free_aead(aead);
1051 }
1052
1053 static int esp_init_aead(struct xfrm_state *x)
1054 {
1055 char aead_name[CRYPTO_MAX_ALG_NAME];
1056 struct crypto_aead *aead;
1057 int err;
1058
1059 err = -ENAMETOOLONG;
1060 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1061 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
1062 goto error;
1063
1064 aead = crypto_alloc_aead(aead_name, 0, 0);
1065 err = PTR_ERR(aead);
1066 if (IS_ERR(aead))
1067 goto error;
1068
1069 x->data = aead;
1070
1071 err = crypto_aead_setkey(aead, x->aead->alg_key,
1072 (x->aead->alg_key_len + 7) / 8);
1073 if (err)
1074 goto error;
1075
1076 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1077 if (err)
1078 goto error;
1079
1080 error:
1081 return err;
1082 }
1083
1084 static int esp_init_authenc(struct xfrm_state *x)
1085 {
1086 struct crypto_aead *aead;
1087 struct crypto_authenc_key_param *param;
1088 struct rtattr *rta;
1089 char *key;
1090 char *p;
1091 char authenc_name[CRYPTO_MAX_ALG_NAME];
1092 unsigned int keylen;
1093 int err;
1094
1095 err = -EINVAL;
1096 if (!x->ealg)
1097 goto error;
1098
1099 err = -ENAMETOOLONG;
1100
1101 if ((x->props.flags & XFRM_STATE_ESN)) {
1102 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1103 "%s%sauthencesn(%s,%s)%s",
1104 x->geniv ?: "", x->geniv ? "(" : "",
1105 x->aalg ? x->aalg->alg_name : "digest_null",
1106 x->ealg->alg_name,
1107 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1108 goto error;
1109 } else {
1110 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
1111 "%s%sauthenc(%s,%s)%s",
1112 x->geniv ?: "", x->geniv ? "(" : "",
1113 x->aalg ? x->aalg->alg_name : "digest_null",
1114 x->ealg->alg_name,
1115 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
1116 goto error;
1117 }
1118
1119 aead = crypto_alloc_aead(authenc_name, 0, 0);
1120 err = PTR_ERR(aead);
1121 if (IS_ERR(aead))
1122 goto error;
1123
1124 x->data = aead;
1125
1126 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1127 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1128 err = -ENOMEM;
1129 key = kmalloc(keylen, GFP_KERNEL);
1130 if (!key)
1131 goto error;
1132
1133 p = key;
1134 rta = (void *)p;
1135 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1136 rta->rta_len = RTA_LENGTH(sizeof(*param));
1137 param = RTA_DATA(rta);
1138 p += RTA_SPACE(sizeof(*param));
1139
1140 if (x->aalg) {
1141 struct xfrm_algo_desc *aalg_desc;
1142
1143 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1144 p += (x->aalg->alg_key_len + 7) / 8;
1145
1146 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1147 BUG_ON(!aalg_desc);
1148
1149 err = -EINVAL;
1150 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
1151 crypto_aead_authsize(aead)) {
1152 pr_info("ESP: %s digestsize %u != %u\n",
1153 x->aalg->alg_name,
1154 crypto_aead_authsize(aead),
1155 aalg_desc->uinfo.auth.icv_fullbits / 8);
1156 goto free_key;
1157 }
1158
1159 err = crypto_aead_setauthsize(
1160 aead, x->aalg->alg_trunc_len / 8);
1161 if (err)
1162 goto free_key;
1163 }
1164
1165 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1166 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1167
1168 err = crypto_aead_setkey(aead, key, keylen);
1169
1170 free_key:
1171 kfree(key);
1172
1173 error:
1174 return err;
1175 }
1176
1177 static int esp6_init_state(struct xfrm_state *x)
1178 {
1179 struct crypto_aead *aead;
1180 u32 align;
1181 int err;
1182
1183 x->data = NULL;
1184
1185 if (x->aead)
1186 err = esp_init_aead(x);
1187 else
1188 err = esp_init_authenc(x);
1189
1190 if (err)
1191 goto error;
1192
1193 aead = x->data;
1194
1195 x->props.header_len = sizeof(struct ip_esp_hdr) +
1196 crypto_aead_ivsize(aead);
1197 switch (x->props.mode) {
1198 case XFRM_MODE_BEET:
1199 if (x->sel.family != AF_INET6)
1200 x->props.header_len += IPV4_BEET_PHMAXLEN +
1201 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
1202 break;
1203 default:
1204 case XFRM_MODE_TRANSPORT:
1205 break;
1206 case XFRM_MODE_TUNNEL:
1207 x->props.header_len += sizeof(struct ipv6hdr);
1208 break;
1209 }
1210
1211 if (x->encap) {
1212 struct xfrm_encap_tmpl *encap = x->encap;
1213
1214 switch (encap->encap_type) {
1215 default:
1216 err = -EINVAL;
1217 goto error;
1218 case UDP_ENCAP_ESPINUDP:
1219 x->props.header_len += sizeof(struct udphdr);
1220 break;
1221 case UDP_ENCAP_ESPINUDP_NON_IKE:
1222 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1223 break;
1224 #ifdef CONFIG_INET6_ESPINTCP
1225 case TCP_ENCAP_ESPINTCP:
1226
1227
1228
1229 x->props.header_len += 2;
1230 break;
1231 #endif
1232 }
1233 }
1234
1235 align = ALIGN(crypto_aead_blocksize(aead), 4);
1236 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
1237
1238 error:
1239 return err;
1240 }
1241
1242 static int esp6_rcv_cb(struct sk_buff *skb, int err)
1243 {
1244 return 0;
1245 }
1246
1247 static const struct xfrm_type esp6_type = {
1248 .owner = THIS_MODULE,
1249 .proto = IPPROTO_ESP,
1250 .flags = XFRM_TYPE_REPLAY_PROT,
1251 .init_state = esp6_init_state,
1252 .destructor = esp6_destroy,
1253 .input = esp6_input,
1254 .output = esp6_output,
1255 };
1256
1257 static struct xfrm6_protocol esp6_protocol = {
1258 .handler = xfrm6_rcv,
1259 .input_handler = xfrm_input,
1260 .cb_handler = esp6_rcv_cb,
1261 .err_handler = esp6_err,
1262 .priority = 0,
1263 };
1264
1265 static int __init esp6_init(void)
1266 {
1267 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
1268 pr_info("%s: can't add xfrm type\n", __func__);
1269 return -EAGAIN;
1270 }
1271 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
1272 pr_info("%s: can't add protocol\n", __func__);
1273 xfrm_unregister_type(&esp6_type, AF_INET6);
1274 return -EAGAIN;
1275 }
1276
1277 return 0;
1278 }
1279
1280 static void __exit esp6_fini(void)
1281 {
1282 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
1283 pr_info("%s: can't remove protocol\n", __func__);
1284 xfrm_unregister_type(&esp6_type, AF_INET6);
1285 }
1286
1287 module_init(esp6_init);
1288 module_exit(esp6_fini);
1289
1290 MODULE_LICENSE("GPL");
1291 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);