0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/bottom_half.h>
0012 #include <linux/cache.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/slab.h>
0015 #include <linux/module.h>
0016 #include <linux/netdevice.h>
0017 #include <linux/percpu.h>
0018 #include <net/dst.h>
0019 #include <net/ip.h>
0020 #include <net/xfrm.h>
0021 #include <net/ip_tunnels.h>
0022 #include <net/ip6_tunnel.h>
0023
0024 #include "xfrm_inout.h"
0025
0026 struct xfrm_trans_tasklet {
0027 struct tasklet_struct tasklet;
0028 struct sk_buff_head queue;
0029 };
0030
0031 struct xfrm_trans_cb {
0032 union {
0033 struct inet_skb_parm h4;
0034 #if IS_ENABLED(CONFIG_IPV6)
0035 struct inet6_skb_parm h6;
0036 #endif
0037 } header;
0038 int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
0039 struct net *net;
0040 };
0041
0042 #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
0043
0044 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
0045 static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1];
0046
0047 static struct gro_cells gro_cells;
0048 static struct net_device xfrm_napi_dev;
0049
0050 static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
0051
0052 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
0053 {
0054 int err = 0;
0055
0056 if (WARN_ON(afinfo->family > AF_INET6))
0057 return -EAFNOSUPPORT;
0058
0059 spin_lock_bh(&xfrm_input_afinfo_lock);
0060 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family]))
0061 err = -EEXIST;
0062 else
0063 rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo);
0064 spin_unlock_bh(&xfrm_input_afinfo_lock);
0065 return err;
0066 }
0067 EXPORT_SYMBOL(xfrm_input_register_afinfo);
0068
0069 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
0070 {
0071 int err = 0;
0072
0073 spin_lock_bh(&xfrm_input_afinfo_lock);
0074 if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) {
0075 if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo))
0076 err = -EINVAL;
0077 else
0078 RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL);
0079 }
0080 spin_unlock_bh(&xfrm_input_afinfo_lock);
0081 synchronize_rcu();
0082 return err;
0083 }
0084 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
0085
0086 static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip)
0087 {
0088 const struct xfrm_input_afinfo *afinfo;
0089
0090 if (WARN_ON_ONCE(family > AF_INET6))
0091 return NULL;
0092
0093 rcu_read_lock();
0094 afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]);
0095 if (unlikely(!afinfo))
0096 rcu_read_unlock();
0097 return afinfo;
0098 }
0099
0100 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
0101 int err)
0102 {
0103 bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6);
0104 const struct xfrm_input_afinfo *afinfo;
0105 int ret;
0106
0107 afinfo = xfrm_input_get_afinfo(family, is_ipip);
0108 if (!afinfo)
0109 return -EAFNOSUPPORT;
0110
0111 ret = afinfo->callback(skb, protocol, err);
0112 rcu_read_unlock();
0113
0114 return ret;
0115 }
0116
0117 struct sec_path *secpath_set(struct sk_buff *skb)
0118 {
0119 struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
0120
0121 sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
0122 if (!sp)
0123 return NULL;
0124
0125 if (tmp)
0126 return sp;
0127
0128
0129 memset(sp->ovec, 0, sizeof(sp->ovec));
0130 sp->olen = 0;
0131 sp->len = 0;
0132
0133 return sp;
0134 }
0135 EXPORT_SYMBOL(secpath_set);
0136
0137
0138
0139 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
0140 {
0141 int offset, offset_seq;
0142 int hlen;
0143
0144 switch (nexthdr) {
0145 case IPPROTO_AH:
0146 hlen = sizeof(struct ip_auth_hdr);
0147 offset = offsetof(struct ip_auth_hdr, spi);
0148 offset_seq = offsetof(struct ip_auth_hdr, seq_no);
0149 break;
0150 case IPPROTO_ESP:
0151 hlen = sizeof(struct ip_esp_hdr);
0152 offset = offsetof(struct ip_esp_hdr, spi);
0153 offset_seq = offsetof(struct ip_esp_hdr, seq_no);
0154 break;
0155 case IPPROTO_COMP:
0156 if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
0157 return -EINVAL;
0158 *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
0159 *seq = 0;
0160 return 0;
0161 default:
0162 return 1;
0163 }
0164
0165 if (!pskb_may_pull(skb, hlen))
0166 return -EINVAL;
0167
0168 *spi = *(__be32 *)(skb_transport_header(skb) + offset);
0169 *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
0170 return 0;
0171 }
0172 EXPORT_SYMBOL(xfrm_parse_spi);
0173
0174 static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
0175 {
0176 struct iphdr *iph;
0177 int optlen = 0;
0178 int err = -EINVAL;
0179
0180 if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
0181 struct ip_beet_phdr *ph;
0182 int phlen;
0183
0184 if (!pskb_may_pull(skb, sizeof(*ph)))
0185 goto out;
0186
0187 ph = (struct ip_beet_phdr *)skb->data;
0188
0189 phlen = sizeof(*ph) + ph->padlen;
0190 optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
0191 if (optlen < 0 || optlen & 3 || optlen > 250)
0192 goto out;
0193
0194 XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
0195
0196 if (!pskb_may_pull(skb, phlen))
0197 goto out;
0198 __skb_pull(skb, phlen);
0199 }
0200
0201 skb_push(skb, sizeof(*iph));
0202 skb_reset_network_header(skb);
0203 skb_mac_header_rebuild(skb);
0204
0205 xfrm4_beet_make_header(skb);
0206
0207 iph = ip_hdr(skb);
0208
0209 iph->ihl += optlen / 4;
0210 iph->tot_len = htons(skb->len);
0211 iph->daddr = x->sel.daddr.a4;
0212 iph->saddr = x->sel.saddr.a4;
0213 iph->check = 0;
0214 iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
0215 err = 0;
0216 out:
0217 return err;
0218 }
0219
0220 static void ipip_ecn_decapsulate(struct sk_buff *skb)
0221 {
0222 struct iphdr *inner_iph = ipip_hdr(skb);
0223
0224 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
0225 IP_ECN_set_ce(inner_iph);
0226 }
0227
0228 static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
0229 {
0230 int err = -EINVAL;
0231
0232 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
0233 goto out;
0234
0235 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
0236 goto out;
0237
0238 err = skb_unclone(skb, GFP_ATOMIC);
0239 if (err)
0240 goto out;
0241
0242 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
0243 ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
0244 if (!(x->props.flags & XFRM_STATE_NOECN))
0245 ipip_ecn_decapsulate(skb);
0246
0247 skb_reset_network_header(skb);
0248 skb_mac_header_rebuild(skb);
0249 if (skb->mac_len)
0250 eth_hdr(skb)->h_proto = skb->protocol;
0251
0252 err = 0;
0253
0254 out:
0255 return err;
0256 }
0257
0258 static void ipip6_ecn_decapsulate(struct sk_buff *skb)
0259 {
0260 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
0261
0262 if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
0263 IP6_ECN_set_ce(skb, inner_iph);
0264 }
0265
0266 static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
0267 {
0268 int err = -EINVAL;
0269
0270 if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
0271 goto out;
0272 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
0273 goto out;
0274
0275 err = skb_unclone(skb, GFP_ATOMIC);
0276 if (err)
0277 goto out;
0278
0279 if (x->props.flags & XFRM_STATE_DECAP_DSCP)
0280 ipv6_copy_dscp(ipv6_get_dsfield(ipv6_hdr(skb)),
0281 ipipv6_hdr(skb));
0282 if (!(x->props.flags & XFRM_STATE_NOECN))
0283 ipip6_ecn_decapsulate(skb);
0284
0285 skb_reset_network_header(skb);
0286 skb_mac_header_rebuild(skb);
0287 if (skb->mac_len)
0288 eth_hdr(skb)->h_proto = skb->protocol;
0289
0290 err = 0;
0291
0292 out:
0293 return err;
0294 }
0295
0296 static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
0297 {
0298 struct ipv6hdr *ip6h;
0299 int size = sizeof(struct ipv6hdr);
0300 int err;
0301
0302 err = skb_cow_head(skb, size + skb->mac_len);
0303 if (err)
0304 goto out;
0305
0306 __skb_push(skb, size);
0307 skb_reset_network_header(skb);
0308 skb_mac_header_rebuild(skb);
0309
0310 xfrm6_beet_make_header(skb);
0311
0312 ip6h = ipv6_hdr(skb);
0313 ip6h->payload_len = htons(skb->len - size);
0314 ip6h->daddr = x->sel.daddr.in6;
0315 ip6h->saddr = x->sel.saddr.in6;
0316 err = 0;
0317 out:
0318 return err;
0319 }
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331 static int
0332 xfrm_inner_mode_encap_remove(struct xfrm_state *x,
0333 const struct xfrm_mode *inner_mode,
0334 struct sk_buff *skb)
0335 {
0336 switch (inner_mode->encap) {
0337 case XFRM_MODE_BEET:
0338 if (inner_mode->family == AF_INET)
0339 return xfrm4_remove_beet_encap(x, skb);
0340 if (inner_mode->family == AF_INET6)
0341 return xfrm6_remove_beet_encap(x, skb);
0342 break;
0343 case XFRM_MODE_TUNNEL:
0344 if (inner_mode->family == AF_INET)
0345 return xfrm4_remove_tunnel_encap(x, skb);
0346 if (inner_mode->family == AF_INET6)
0347 return xfrm6_remove_tunnel_encap(x, skb);
0348 break;
0349 }
0350
0351 WARN_ON_ONCE(1);
0352 return -EOPNOTSUPP;
0353 }
0354
0355 static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
0356 {
0357 const struct xfrm_mode *inner_mode = &x->inner_mode;
0358
0359 switch (x->outer_mode.family) {
0360 case AF_INET:
0361 xfrm4_extract_header(skb);
0362 break;
0363 case AF_INET6:
0364 xfrm6_extract_header(skb);
0365 break;
0366 default:
0367 WARN_ON_ONCE(1);
0368 return -EAFNOSUPPORT;
0369 }
0370
0371 if (x->sel.family == AF_UNSPEC) {
0372 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
0373 if (!inner_mode)
0374 return -EAFNOSUPPORT;
0375 }
0376
0377 switch (inner_mode->family) {
0378 case AF_INET:
0379 skb->protocol = htons(ETH_P_IP);
0380 break;
0381 case AF_INET6:
0382 skb->protocol = htons(ETH_P_IPV6);
0383 break;
0384 default:
0385 WARN_ON_ONCE(1);
0386 break;
0387 }
0388
0389 return xfrm_inner_mode_encap_remove(x, inner_mode, skb);
0390 }
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
0401 {
0402 int ihl = skb->data - skb_transport_header(skb);
0403
0404 if (skb->transport_header != skb->network_header) {
0405 memmove(skb_transport_header(skb),
0406 skb_network_header(skb), ihl);
0407 skb->network_header = skb->transport_header;
0408 }
0409 ip_hdr(skb)->tot_len = htons(skb->len + ihl);
0410 skb_reset_transport_header(skb);
0411 return 0;
0412 }
0413
0414 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
0415 {
0416 #if IS_ENABLED(CONFIG_IPV6)
0417 int ihl = skb->data - skb_transport_header(skb);
0418
0419 if (skb->transport_header != skb->network_header) {
0420 memmove(skb_transport_header(skb),
0421 skb_network_header(skb), ihl);
0422 skb->network_header = skb->transport_header;
0423 }
0424 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
0425 sizeof(struct ipv6hdr));
0426 skb_reset_transport_header(skb);
0427 return 0;
0428 #else
0429 WARN_ON_ONCE(1);
0430 return -EAFNOSUPPORT;
0431 #endif
0432 }
0433
0434 static int xfrm_inner_mode_input(struct xfrm_state *x,
0435 const struct xfrm_mode *inner_mode,
0436 struct sk_buff *skb)
0437 {
0438 switch (inner_mode->encap) {
0439 case XFRM_MODE_BEET:
0440 case XFRM_MODE_TUNNEL:
0441 return xfrm_prepare_input(x, skb);
0442 case XFRM_MODE_TRANSPORT:
0443 if (inner_mode->family == AF_INET)
0444 return xfrm4_transport_input(x, skb);
0445 if (inner_mode->family == AF_INET6)
0446 return xfrm6_transport_input(x, skb);
0447 break;
0448 case XFRM_MODE_ROUTEOPTIMIZATION:
0449 WARN_ON_ONCE(1);
0450 break;
0451 default:
0452 WARN_ON_ONCE(1);
0453 break;
0454 }
0455
0456 return -EOPNOTSUPP;
0457 }
0458
0459 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
0460 {
0461 const struct xfrm_state_afinfo *afinfo;
0462 struct net *net = dev_net(skb->dev);
0463 const struct xfrm_mode *inner_mode;
0464 int err;
0465 __be32 seq;
0466 __be32 seq_hi;
0467 struct xfrm_state *x = NULL;
0468 xfrm_address_t *daddr;
0469 u32 mark = skb->mark;
0470 unsigned int family = AF_UNSPEC;
0471 int decaps = 0;
0472 int async = 0;
0473 bool xfrm_gro = false;
0474 bool crypto_done = false;
0475 struct xfrm_offload *xo = xfrm_offload(skb);
0476 struct sec_path *sp;
0477
0478 if (encap_type < 0) {
0479 x = xfrm_input_state(skb);
0480
0481 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
0482 if (x->km.state == XFRM_STATE_ACQ)
0483 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
0484 else
0485 XFRM_INC_STATS(net,
0486 LINUX_MIB_XFRMINSTATEINVALID);
0487
0488 if (encap_type == -1)
0489 dev_put(skb->dev);
0490 goto drop;
0491 }
0492
0493 family = x->outer_mode.family;
0494
0495
0496 if (encap_type == -1) {
0497 async = 1;
0498 seq = XFRM_SKB_CB(skb)->seq.input.low;
0499 goto resume;
0500 }
0501
0502
0503 encap_type = 0;
0504 seq = XFRM_SPI_SKB_CB(skb)->seq;
0505
0506 if (xo && (xo->flags & CRYPTO_DONE)) {
0507 crypto_done = true;
0508 family = XFRM_SPI_SKB_CB(skb)->family;
0509
0510 if (!(xo->status & CRYPTO_SUCCESS)) {
0511 if (xo->status &
0512 (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
0513 CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
0514 CRYPTO_TUNNEL_AH_AUTH_FAILED |
0515 CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
0516
0517 xfrm_audit_state_icvfail(x, skb,
0518 x->type->proto);
0519 x->stats.integrity_failed++;
0520 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
0521 goto drop;
0522 }
0523
0524 if (xo->status & CRYPTO_INVALID_PROTOCOL) {
0525 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
0526 goto drop;
0527 }
0528
0529 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
0530 goto drop;
0531 }
0532
0533 if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) {
0534 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
0535 goto drop;
0536 }
0537 }
0538
0539 goto lock;
0540 }
0541
0542 family = XFRM_SPI_SKB_CB(skb)->family;
0543
0544
0545 switch (family) {
0546 case AF_INET:
0547 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
0548 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
0549 break;
0550 case AF_INET6:
0551 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
0552 mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
0553 break;
0554 }
0555
0556 sp = secpath_set(skb);
0557 if (!sp) {
0558 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
0559 goto drop;
0560 }
0561
0562 seq = 0;
0563 if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) {
0564 secpath_reset(skb);
0565 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
0566 goto drop;
0567 }
0568
0569 daddr = (xfrm_address_t *)(skb_network_header(skb) +
0570 XFRM_SPI_SKB_CB(skb)->daddroff);
0571 do {
0572 sp = skb_sec_path(skb);
0573
0574 if (sp->len == XFRM_MAX_DEPTH) {
0575 secpath_reset(skb);
0576 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
0577 goto drop;
0578 }
0579
0580 x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
0581 if (x == NULL) {
0582 secpath_reset(skb);
0583 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
0584 xfrm_audit_state_notfound(skb, family, spi, seq);
0585 goto drop;
0586 }
0587
0588 skb->mark = xfrm_smark_get(skb->mark, x);
0589
0590 sp->xvec[sp->len++] = x;
0591
0592 skb_dst_force(skb);
0593 if (!skb_dst(skb)) {
0594 XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
0595 goto drop;
0596 }
0597
0598 lock:
0599 spin_lock(&x->lock);
0600
0601 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
0602 if (x->km.state == XFRM_STATE_ACQ)
0603 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
0604 else
0605 XFRM_INC_STATS(net,
0606 LINUX_MIB_XFRMINSTATEINVALID);
0607 goto drop_unlock;
0608 }
0609
0610 if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
0611 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
0612 goto drop_unlock;
0613 }
0614
0615 if (xfrm_replay_check(x, skb, seq)) {
0616 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
0617 goto drop_unlock;
0618 }
0619
0620 if (xfrm_state_check_expire(x)) {
0621 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
0622 goto drop_unlock;
0623 }
0624
0625 spin_unlock(&x->lock);
0626
0627 if (xfrm_tunnel_check(skb, x, family)) {
0628 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
0629 goto drop;
0630 }
0631
0632 seq_hi = htonl(xfrm_replay_seqhi(x, seq));
0633
0634 XFRM_SKB_CB(skb)->seq.input.low = seq;
0635 XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
0636
0637 dev_hold(skb->dev);
0638
0639 if (crypto_done)
0640 nexthdr = x->type_offload->input_tail(x, skb);
0641 else
0642 nexthdr = x->type->input(x, skb);
0643
0644 if (nexthdr == -EINPROGRESS)
0645 return 0;
0646 resume:
0647 dev_put(skb->dev);
0648
0649 spin_lock(&x->lock);
0650 if (nexthdr < 0) {
0651 if (nexthdr == -EBADMSG) {
0652 xfrm_audit_state_icvfail(x, skb,
0653 x->type->proto);
0654 x->stats.integrity_failed++;
0655 }
0656 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
0657 goto drop_unlock;
0658 }
0659
0660
0661 encap_type = 0;
0662
0663 if (xfrm_replay_recheck(x, skb, seq)) {
0664 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
0665 goto drop_unlock;
0666 }
0667
0668 xfrm_replay_advance(x, seq);
0669
0670 x->curlft.bytes += skb->len;
0671 x->curlft.packets++;
0672
0673 spin_unlock(&x->lock);
0674
0675 XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
0676
0677 inner_mode = &x->inner_mode;
0678
0679 if (x->sel.family == AF_UNSPEC) {
0680 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
0681 if (inner_mode == NULL) {
0682 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
0683 goto drop;
0684 }
0685 }
0686
0687 if (xfrm_inner_mode_input(x, inner_mode, skb)) {
0688 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
0689 goto drop;
0690 }
0691
0692 if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
0693 decaps = 1;
0694 break;
0695 }
0696
0697
0698
0699
0700
0701 daddr = &x->id.daddr;
0702 family = x->outer_mode.family;
0703
0704 err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
0705 if (err < 0) {
0706 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
0707 goto drop;
0708 }
0709 crypto_done = false;
0710 } while (!err);
0711
0712 err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
0713 if (err)
0714 goto drop;
0715
0716 nf_reset_ct(skb);
0717
0718 if (decaps) {
0719 sp = skb_sec_path(skb);
0720 if (sp)
0721 sp->olen = 0;
0722 skb_dst_drop(skb);
0723 gro_cells_receive(&gro_cells, skb);
0724 return 0;
0725 } else {
0726 xo = xfrm_offload(skb);
0727 if (xo)
0728 xfrm_gro = xo->flags & XFRM_GRO;
0729
0730 err = -EAFNOSUPPORT;
0731 rcu_read_lock();
0732 afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family);
0733 if (likely(afinfo))
0734 err = afinfo->transport_finish(skb, xfrm_gro || async);
0735 rcu_read_unlock();
0736 if (xfrm_gro) {
0737 sp = skb_sec_path(skb);
0738 if (sp)
0739 sp->olen = 0;
0740 skb_dst_drop(skb);
0741 gro_cells_receive(&gro_cells, skb);
0742 return err;
0743 }
0744
0745 return err;
0746 }
0747
0748 drop_unlock:
0749 spin_unlock(&x->lock);
0750 drop:
0751 xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
0752 kfree_skb(skb);
0753 return 0;
0754 }
0755 EXPORT_SYMBOL(xfrm_input);
0756
0757 int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
0758 {
0759 return xfrm_input(skb, nexthdr, 0, -1);
0760 }
0761 EXPORT_SYMBOL(xfrm_input_resume);
0762
0763 static void xfrm_trans_reinject(struct tasklet_struct *t)
0764 {
0765 struct xfrm_trans_tasklet *trans = from_tasklet(trans, t, tasklet);
0766 struct sk_buff_head queue;
0767 struct sk_buff *skb;
0768
0769 __skb_queue_head_init(&queue);
0770 skb_queue_splice_init(&trans->queue, &queue);
0771
0772 while ((skb = __skb_dequeue(&queue)))
0773 XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
0774 NULL, skb);
0775 }
0776
0777 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
0778 int (*finish)(struct net *, struct sock *,
0779 struct sk_buff *))
0780 {
0781 struct xfrm_trans_tasklet *trans;
0782
0783 trans = this_cpu_ptr(&xfrm_trans_tasklet);
0784
0785 if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
0786 return -ENOBUFS;
0787
0788 BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
0789
0790 XFRM_TRANS_SKB_CB(skb)->finish = finish;
0791 XFRM_TRANS_SKB_CB(skb)->net = net;
0792 __skb_queue_tail(&trans->queue, skb);
0793 tasklet_schedule(&trans->tasklet);
0794 return 0;
0795 }
0796 EXPORT_SYMBOL(xfrm_trans_queue_net);
0797
0798 int xfrm_trans_queue(struct sk_buff *skb,
0799 int (*finish)(struct net *, struct sock *,
0800 struct sk_buff *))
0801 {
0802 return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish);
0803 }
0804 EXPORT_SYMBOL(xfrm_trans_queue);
0805
0806 void __init xfrm_input_init(void)
0807 {
0808 int err;
0809 int i;
0810
0811 init_dummy_netdev(&xfrm_napi_dev);
0812 err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
0813 if (err)
0814 gro_cells.cells = NULL;
0815
0816 for_each_possible_cpu(i) {
0817 struct xfrm_trans_tasklet *trans;
0818
0819 trans = &per_cpu(xfrm_trans_tasklet, i);
0820 __skb_queue_head_init(&trans->queue);
0821 tasklet_setup(&trans->tasklet, xfrm_trans_reinject);
0822 }
0823 }