0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0018
0019 #include <linux/module.h>
0020 #include <linux/string.h>
0021 #include <linux/list.h>
0022 #include <linux/rculist.h>
0023 #include <linux/uaccess.h>
0024
0025 #include <linux/kernel.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/kthread.h>
0028 #include <linux/sched.h>
0029 #include <linux/slab.h>
0030 #include <linux/errno.h>
0031 #include <linux/jiffies.h>
0032
0033 #include <linux/netdevice.h>
0034 #include <linux/net.h>
0035 #include <linux/inetdevice.h>
0036 #include <linux/skbuff.h>
0037 #include <linux/init.h>
0038 #include <linux/in.h>
0039 #include <linux/ip.h>
0040 #include <linux/udp.h>
0041 #include <linux/l2tp.h>
0042 #include <linux/hash.h>
0043 #include <linux/sort.h>
0044 #include <linux/file.h>
0045 #include <linux/nsproxy.h>
0046 #include <net/net_namespace.h>
0047 #include <net/netns/generic.h>
0048 #include <net/dst.h>
0049 #include <net/ip.h>
0050 #include <net/udp.h>
0051 #include <net/udp_tunnel.h>
0052 #include <net/inet_common.h>
0053 #include <net/xfrm.h>
0054 #include <net/protocol.h>
0055 #include <net/inet6_connection_sock.h>
0056 #include <net/inet_ecn.h>
0057 #include <net/ip6_route.h>
0058 #include <net/ip6_checksum.h>
0059
0060 #include <asm/byteorder.h>
0061 #include <linux/atomic.h>
0062
0063 #include "l2tp_core.h"
0064 #include "trace.h"
0065
0066 #define CREATE_TRACE_POINTS
0067 #include "trace.h"
0068
0069 #define L2TP_DRV_VERSION "V2.0"
0070
0071
0072 #define L2TP_HDRFLAG_T 0x8000
0073 #define L2TP_HDRFLAG_L 0x4000
0074 #define L2TP_HDRFLAG_S 0x0800
0075 #define L2TP_HDRFLAG_O 0x0200
0076 #define L2TP_HDRFLAG_P 0x0100
0077
0078 #define L2TP_HDR_VER_MASK 0x000F
0079 #define L2TP_HDR_VER_2 0x0002
0080 #define L2TP_HDR_VER_3 0x0003
0081
0082
0083 #define L2TP_SLFLAG_S 0x40000000
0084 #define L2TP_SL_SEQ_MASK 0x00ffffff
0085
0086 #define L2TP_HDR_SIZE_MAX 14
0087
0088
0089 #define L2TP_DEFAULT_DEBUG_FLAGS 0
0090
0091
0092
0093 struct l2tp_skb_cb {
0094 u32 ns;
0095 u16 has_seq;
0096 u16 length;
0097 unsigned long expires;
0098 };
0099
0100 #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
0101
0102 static struct workqueue_struct *l2tp_wq;
0103
0104
0105 static unsigned int l2tp_net_id;
0106 struct l2tp_net {
0107 struct list_head l2tp_tunnel_list;
0108
0109 spinlock_t l2tp_tunnel_list_lock;
0110 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
0111
0112 spinlock_t l2tp_session_hlist_lock;
0113 };
0114
0115 #if IS_ENABLED(CONFIG_IPV6)
0116 static bool l2tp_sk_is_v6(struct sock *sk)
0117 {
0118 return sk->sk_family == PF_INET6 &&
0119 !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
0120 }
0121 #endif
0122
0123 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
0124 {
0125 return net_generic(net, l2tp_net_id);
0126 }
0127
0128
0129
0130
0131
0132
0133 static inline struct hlist_head *
0134 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
0135 {
0136 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
0137 }
0138
0139
0140
0141
0142
0143
0144
0145 static inline struct hlist_head *
0146 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
0147 {
0148 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
0149 }
0150
0151 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
0152 {
0153 trace_free_tunnel(tunnel);
0154 sock_put(tunnel->sock);
0155
0156 }
0157
0158 static void l2tp_session_free(struct l2tp_session *session)
0159 {
0160 trace_free_session(session);
0161 if (session->tunnel)
0162 l2tp_tunnel_dec_refcount(session->tunnel);
0163 kfree(session);
0164 }
0165
0166 struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
0167 {
0168 struct l2tp_tunnel *tunnel = sk->sk_user_data;
0169
0170 if (tunnel)
0171 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
0172 return NULL;
0173
0174 return tunnel;
0175 }
0176 EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
0177
0178 void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
0179 {
0180 refcount_inc(&tunnel->ref_count);
0181 }
0182 EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
0183
0184 void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
0185 {
0186 if (refcount_dec_and_test(&tunnel->ref_count))
0187 l2tp_tunnel_free(tunnel);
0188 }
0189 EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
0190
0191 void l2tp_session_inc_refcount(struct l2tp_session *session)
0192 {
0193 refcount_inc(&session->ref_count);
0194 }
0195 EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
0196
0197 void l2tp_session_dec_refcount(struct l2tp_session *session)
0198 {
0199 if (refcount_dec_and_test(&session->ref_count))
0200 l2tp_session_free(session);
0201 }
0202 EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
0203
0204
0205 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
0206 {
0207 const struct l2tp_net *pn = l2tp_pernet(net);
0208 struct l2tp_tunnel *tunnel;
0209
0210 rcu_read_lock_bh();
0211 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
0212 if (tunnel->tunnel_id == tunnel_id &&
0213 refcount_inc_not_zero(&tunnel->ref_count)) {
0214 rcu_read_unlock_bh();
0215
0216 return tunnel;
0217 }
0218 }
0219 rcu_read_unlock_bh();
0220
0221 return NULL;
0222 }
0223 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
0224
0225 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
0226 {
0227 const struct l2tp_net *pn = l2tp_pernet(net);
0228 struct l2tp_tunnel *tunnel;
0229 int count = 0;
0230
0231 rcu_read_lock_bh();
0232 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
0233 if (++count > nth &&
0234 refcount_inc_not_zero(&tunnel->ref_count)) {
0235 rcu_read_unlock_bh();
0236 return tunnel;
0237 }
0238 }
0239 rcu_read_unlock_bh();
0240
0241 return NULL;
0242 }
0243 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
0244
0245 struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
0246 u32 session_id)
0247 {
0248 struct hlist_head *session_list;
0249 struct l2tp_session *session;
0250
0251 session_list = l2tp_session_id_hash(tunnel, session_id);
0252
0253 rcu_read_lock_bh();
0254 hlist_for_each_entry_rcu(session, session_list, hlist)
0255 if (session->session_id == session_id) {
0256 l2tp_session_inc_refcount(session);
0257 rcu_read_unlock_bh();
0258
0259 return session;
0260 }
0261 rcu_read_unlock_bh();
0262
0263 return NULL;
0264 }
0265 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
0266
0267 struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
0268 {
0269 struct hlist_head *session_list;
0270 struct l2tp_session *session;
0271
0272 session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
0273
0274 rcu_read_lock_bh();
0275 hlist_for_each_entry_rcu(session, session_list, global_hlist)
0276 if (session->session_id == session_id) {
0277 l2tp_session_inc_refcount(session);
0278 rcu_read_unlock_bh();
0279
0280 return session;
0281 }
0282 rcu_read_unlock_bh();
0283
0284 return NULL;
0285 }
0286 EXPORT_SYMBOL_GPL(l2tp_session_get);
0287
0288 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
0289 {
0290 int hash;
0291 struct l2tp_session *session;
0292 int count = 0;
0293
0294 rcu_read_lock_bh();
0295 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
0296 hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
0297 if (++count > nth) {
0298 l2tp_session_inc_refcount(session);
0299 rcu_read_unlock_bh();
0300 return session;
0301 }
0302 }
0303 }
0304
0305 rcu_read_unlock_bh();
0306
0307 return NULL;
0308 }
0309 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
0310
0311
0312
0313
0314 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
0315 const char *ifname)
0316 {
0317 struct l2tp_net *pn = l2tp_pernet(net);
0318 int hash;
0319 struct l2tp_session *session;
0320
0321 rcu_read_lock_bh();
0322 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
0323 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
0324 if (!strcmp(session->ifname, ifname)) {
0325 l2tp_session_inc_refcount(session);
0326 rcu_read_unlock_bh();
0327
0328 return session;
0329 }
0330 }
0331 }
0332
0333 rcu_read_unlock_bh();
0334
0335 return NULL;
0336 }
0337 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
0338
0339 int l2tp_session_register(struct l2tp_session *session,
0340 struct l2tp_tunnel *tunnel)
0341 {
0342 struct l2tp_session *session_walk;
0343 struct hlist_head *g_head;
0344 struct hlist_head *head;
0345 struct l2tp_net *pn;
0346 int err;
0347
0348 head = l2tp_session_id_hash(tunnel, session->session_id);
0349
0350 spin_lock_bh(&tunnel->hlist_lock);
0351 if (!tunnel->acpt_newsess) {
0352 err = -ENODEV;
0353 goto err_tlock;
0354 }
0355
0356 hlist_for_each_entry(session_walk, head, hlist)
0357 if (session_walk->session_id == session->session_id) {
0358 err = -EEXIST;
0359 goto err_tlock;
0360 }
0361
0362 if (tunnel->version == L2TP_HDR_VER_3) {
0363 pn = l2tp_pernet(tunnel->l2tp_net);
0364 g_head = l2tp_session_id_hash_2(pn, session->session_id);
0365
0366 spin_lock_bh(&pn->l2tp_session_hlist_lock);
0367
0368
0369
0370
0371 hlist_for_each_entry(session_walk, g_head, global_hlist)
0372 if (session_walk->session_id == session->session_id &&
0373 (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
0374 tunnel->encap == L2TP_ENCAPTYPE_IP)) {
0375 err = -EEXIST;
0376 goto err_tlock_pnlock;
0377 }
0378
0379 l2tp_tunnel_inc_refcount(tunnel);
0380 hlist_add_head_rcu(&session->global_hlist, g_head);
0381
0382 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
0383 } else {
0384 l2tp_tunnel_inc_refcount(tunnel);
0385 }
0386
0387 hlist_add_head_rcu(&session->hlist, head);
0388 spin_unlock_bh(&tunnel->hlist_lock);
0389
0390 trace_register_session(session);
0391
0392 return 0;
0393
0394 err_tlock_pnlock:
0395 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
0396 err_tlock:
0397 spin_unlock_bh(&tunnel->hlist_lock);
0398
0399 return err;
0400 }
0401 EXPORT_SYMBOL_GPL(l2tp_session_register);
0402
0403
0404
0405
0406
0407
0408
0409
0410 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
0411 {
0412 struct sk_buff *skbp;
0413 struct sk_buff *tmp;
0414 u32 ns = L2TP_SKB_CB(skb)->ns;
0415
0416 spin_lock_bh(&session->reorder_q.lock);
0417 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
0418 if (L2TP_SKB_CB(skbp)->ns > ns) {
0419 __skb_queue_before(&session->reorder_q, skbp, skb);
0420 atomic_long_inc(&session->stats.rx_oos_packets);
0421 goto out;
0422 }
0423 }
0424
0425 __skb_queue_tail(&session->reorder_q, skb);
0426
0427 out:
0428 spin_unlock_bh(&session->reorder_q.lock);
0429 }
0430
0431
0432
0433 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
0434 {
0435 struct l2tp_tunnel *tunnel = session->tunnel;
0436 int length = L2TP_SKB_CB(skb)->length;
0437
0438
0439
0440
0441 skb_orphan(skb);
0442
0443 atomic_long_inc(&tunnel->stats.rx_packets);
0444 atomic_long_add(length, &tunnel->stats.rx_bytes);
0445 atomic_long_inc(&session->stats.rx_packets);
0446 atomic_long_add(length, &session->stats.rx_bytes);
0447
0448 if (L2TP_SKB_CB(skb)->has_seq) {
0449
0450 session->nr++;
0451 session->nr &= session->nr_max;
0452 trace_session_seqnum_update(session);
0453 }
0454
0455
0456 if (session->recv_skb)
0457 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
0458 else
0459 kfree_skb(skb);
0460 }
0461
0462
0463
0464
0465 static void l2tp_recv_dequeue(struct l2tp_session *session)
0466 {
0467 struct sk_buff *skb;
0468 struct sk_buff *tmp;
0469
0470
0471
0472
0473
0474 start:
0475 spin_lock_bh(&session->reorder_q.lock);
0476 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
0477 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
0478
0479
0480 if (time_after(jiffies, cb->expires)) {
0481 atomic_long_inc(&session->stats.rx_seq_discards);
0482 atomic_long_inc(&session->stats.rx_errors);
0483 trace_session_pkt_expired(session, cb->ns);
0484 session->reorder_skip = 1;
0485 __skb_unlink(skb, &session->reorder_q);
0486 kfree_skb(skb);
0487 continue;
0488 }
0489
0490 if (cb->has_seq) {
0491 if (session->reorder_skip) {
0492 session->reorder_skip = 0;
0493 session->nr = cb->ns;
0494 trace_session_seqnum_reset(session);
0495 }
0496 if (cb->ns != session->nr)
0497 goto out;
0498 }
0499 __skb_unlink(skb, &session->reorder_q);
0500
0501
0502
0503
0504 spin_unlock_bh(&session->reorder_q.lock);
0505 l2tp_recv_dequeue_skb(session, skb);
0506 goto start;
0507 }
0508
0509 out:
0510 spin_unlock_bh(&session->reorder_q.lock);
0511 }
0512
0513 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
0514 {
0515 u32 nws;
0516
0517 if (nr >= session->nr)
0518 nws = nr - session->nr;
0519 else
0520 nws = (session->nr_max + 1) - (session->nr - nr);
0521
0522 return nws < session->nr_window_size;
0523 }
0524
0525
0526
0527
0528 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
0529 {
0530 struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
0531
0532 if (!l2tp_seq_check_rx_window(session, cb->ns)) {
0533
0534
0535
0536 trace_session_pkt_outside_rx_window(session, cb->ns);
0537 goto discard;
0538 }
0539
0540 if (session->reorder_timeout != 0) {
0541
0542
0543
0544 l2tp_recv_queue_skb(session, skb);
0545 goto out;
0546 }
0547
0548
0549
0550
0551
0552
0553 if (cb->ns == session->nr) {
0554 skb_queue_tail(&session->reorder_q, skb);
0555 } else {
0556 u32 nr_oos = cb->ns;
0557 u32 nr_next = (session->nr_oos + 1) & session->nr_max;
0558
0559 if (nr_oos == nr_next)
0560 session->nr_oos_count++;
0561 else
0562 session->nr_oos_count = 0;
0563
0564 session->nr_oos = nr_oos;
0565 if (session->nr_oos_count > session->nr_oos_count_max) {
0566 session->reorder_skip = 1;
0567 }
0568 if (!session->reorder_skip) {
0569 atomic_long_inc(&session->stats.rx_seq_discards);
0570 trace_session_pkt_oos(session, cb->ns);
0571 goto discard;
0572 }
0573 skb_queue_tail(&session->reorder_q, skb);
0574 }
0575
0576 out:
0577 return 0;
0578
0579 discard:
0580 return 1;
0581 }
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
0643 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
0644 int length)
0645 {
0646 struct l2tp_tunnel *tunnel = session->tunnel;
0647 int offset;
0648
0649
0650 if (session->peer_cookie_len > 0) {
0651 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
0652 pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
0653 tunnel->name, tunnel->tunnel_id,
0654 session->session_id);
0655 atomic_long_inc(&session->stats.rx_cookie_discards);
0656 goto discard;
0657 }
0658 ptr += session->peer_cookie_len;
0659 }
0660
0661
0662
0663
0664
0665
0666
0667
0668 L2TP_SKB_CB(skb)->has_seq = 0;
0669 if (tunnel->version == L2TP_HDR_VER_2) {
0670 if (hdrflags & L2TP_HDRFLAG_S) {
0671
0672 L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
0673 L2TP_SKB_CB(skb)->has_seq = 1;
0674 ptr += 2;
0675
0676 ptr += 2;
0677
0678 }
0679 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
0680 u32 l2h = ntohl(*(__be32 *)ptr);
0681
0682 if (l2h & 0x40000000) {
0683
0684 L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
0685 L2TP_SKB_CB(skb)->has_seq = 1;
0686 }
0687 ptr += 4;
0688 }
0689
0690 if (L2TP_SKB_CB(skb)->has_seq) {
0691
0692
0693
0694
0695 if (!session->lns_mode && !session->send_seq) {
0696 trace_session_seqnum_lns_enable(session);
0697 session->send_seq = 1;
0698 l2tp_session_set_header_len(session, tunnel->version);
0699 }
0700 } else {
0701
0702
0703
0704 if (session->recv_seq) {
0705 pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
0706 session->name);
0707 atomic_long_inc(&session->stats.rx_seq_discards);
0708 goto discard;
0709 }
0710
0711
0712
0713
0714
0715
0716 if (!session->lns_mode && session->send_seq) {
0717 trace_session_seqnum_lns_disable(session);
0718 session->send_seq = 0;
0719 l2tp_session_set_header_len(session, tunnel->version);
0720 } else if (session->send_seq) {
0721 pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
0722 session->name);
0723 atomic_long_inc(&session->stats.rx_seq_discards);
0724 goto discard;
0725 }
0726 }
0727
0728
0729
0730
0731 if (tunnel->version == L2TP_HDR_VER_2) {
0732
0733 if (hdrflags & L2TP_HDRFLAG_O) {
0734 offset = ntohs(*(__be16 *)ptr);
0735 ptr += 2 + offset;
0736 }
0737 }
0738
0739 offset = ptr - optr;
0740 if (!pskb_may_pull(skb, offset))
0741 goto discard;
0742
0743 __skb_pull(skb, offset);
0744
0745
0746
0747
0748
0749 L2TP_SKB_CB(skb)->length = length;
0750 L2TP_SKB_CB(skb)->expires = jiffies +
0751 (session->reorder_timeout ? session->reorder_timeout : HZ);
0752
0753
0754
0755
0756 if (L2TP_SKB_CB(skb)->has_seq) {
0757 if (l2tp_recv_data_seq(session, skb))
0758 goto discard;
0759 } else {
0760
0761
0762
0763
0764 skb_queue_tail(&session->reorder_q, skb);
0765 }
0766
0767
0768 l2tp_recv_dequeue(session);
0769
0770 return;
0771
0772 discard:
0773 atomic_long_inc(&session->stats.rx_errors);
0774 kfree_skb(skb);
0775 }
0776 EXPORT_SYMBOL_GPL(l2tp_recv_common);
0777
0778
0779
0780 static void l2tp_session_queue_purge(struct l2tp_session *session)
0781 {
0782 struct sk_buff *skb = NULL;
0783
0784 while ((skb = skb_dequeue(&session->reorder_q))) {
0785 atomic_long_inc(&session->stats.rx_errors);
0786 kfree_skb(skb);
0787 }
0788 }
0789
0790
0791
0792
0793
0794
0795
0796 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
0797 {
0798 struct l2tp_session *session = NULL;
0799 unsigned char *ptr, *optr;
0800 u16 hdrflags;
0801 u32 tunnel_id, session_id;
0802 u16 version;
0803 int length;
0804
0805
0806
0807
0808 __skb_pull(skb, sizeof(struct udphdr));
0809
0810
0811 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
0812 pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
0813 tunnel->name, skb->len);
0814 goto invalid;
0815 }
0816
0817
0818 optr = skb->data;
0819 ptr = skb->data;
0820
0821
0822 hdrflags = ntohs(*(__be16 *)ptr);
0823
0824
0825 version = hdrflags & L2TP_HDR_VER_MASK;
0826 if (version != tunnel->version) {
0827 pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
0828 tunnel->name, version, tunnel->version);
0829 goto invalid;
0830 }
0831
0832
0833 length = skb->len;
0834
0835
0836 if (hdrflags & L2TP_HDRFLAG_T)
0837 goto pass;
0838
0839
0840 ptr += 2;
0841
0842 if (tunnel->version == L2TP_HDR_VER_2) {
0843
0844 if (hdrflags & L2TP_HDRFLAG_L)
0845 ptr += 2;
0846
0847
0848 tunnel_id = ntohs(*(__be16 *)ptr);
0849 ptr += 2;
0850 session_id = ntohs(*(__be16 *)ptr);
0851 ptr += 2;
0852 } else {
0853 ptr += 2;
0854 tunnel_id = tunnel->tunnel_id;
0855 session_id = ntohl(*(__be32 *)ptr);
0856 ptr += 4;
0857 }
0858
0859
0860 session = l2tp_tunnel_get_session(tunnel, session_id);
0861 if (!session || !session->recv_skb) {
0862 if (session)
0863 l2tp_session_dec_refcount(session);
0864
0865
0866 pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
0867 tunnel->name, tunnel_id, session_id);
0868 goto pass;
0869 }
0870
0871 if (tunnel->version == L2TP_HDR_VER_3 &&
0872 l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
0873 l2tp_session_dec_refcount(session);
0874 goto invalid;
0875 }
0876
0877 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
0878 l2tp_session_dec_refcount(session);
0879
0880 return 0;
0881
0882 invalid:
0883 atomic_long_inc(&tunnel->stats.rx_invalid);
0884
0885 pass:
0886
0887 __skb_push(skb, sizeof(struct udphdr));
0888
0889 return 1;
0890 }
0891
0892
0893
0894
0895
0896
0897
0898 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
0899 {
0900 struct l2tp_tunnel *tunnel;
0901
0902
0903
0904
0905
0906
0907
0908 tunnel = rcu_dereference_sk_user_data(sk);
0909 if (!tunnel)
0910 goto pass_up;
0911 if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
0912 goto pass_up;
0913
0914 if (l2tp_udp_recv_core(tunnel, skb))
0915 goto pass_up;
0916
0917 return 0;
0918
0919 pass_up:
0920 return 1;
0921 }
0922 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
0923
0924
0925
0926
0927
0928
0929
0930 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
0931 {
0932 struct l2tp_tunnel *tunnel = session->tunnel;
0933 __be16 *bufp = buf;
0934 __be16 *optr = buf;
0935 u16 flags = L2TP_HDR_VER_2;
0936 u32 tunnel_id = tunnel->peer_tunnel_id;
0937 u32 session_id = session->peer_session_id;
0938
0939 if (session->send_seq)
0940 flags |= L2TP_HDRFLAG_S;
0941
0942
0943 *bufp++ = htons(flags);
0944 *bufp++ = htons(tunnel_id);
0945 *bufp++ = htons(session_id);
0946 if (session->send_seq) {
0947 *bufp++ = htons(session->ns);
0948 *bufp++ = 0;
0949 session->ns++;
0950 session->ns &= 0xffff;
0951 trace_session_seqnum_update(session);
0952 }
0953
0954 return bufp - optr;
0955 }
0956
0957 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
0958 {
0959 struct l2tp_tunnel *tunnel = session->tunnel;
0960 char *bufp = buf;
0961 char *optr = bufp;
0962
0963
0964
0965
0966 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
0967 u16 flags = L2TP_HDR_VER_3;
0968 *((__be16 *)bufp) = htons(flags);
0969 bufp += 2;
0970 *((__be16 *)bufp) = 0;
0971 bufp += 2;
0972 }
0973
0974 *((__be32 *)bufp) = htonl(session->peer_session_id);
0975 bufp += 4;
0976 if (session->cookie_len) {
0977 memcpy(bufp, &session->cookie[0], session->cookie_len);
0978 bufp += session->cookie_len;
0979 }
0980 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
0981 u32 l2h = 0;
0982
0983 if (session->send_seq) {
0984 l2h = 0x40000000 | session->ns;
0985 session->ns++;
0986 session->ns &= 0xffffff;
0987 trace_session_seqnum_update(session);
0988 }
0989
0990 *((__be32 *)bufp) = htonl(l2h);
0991 bufp += 4;
0992 }
0993
0994 return bufp - optr;
0995 }
0996
0997
0998 static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
0999 {
1000 int err;
1001
1002 skb->ignore_df = 1;
1003 skb_dst_drop(skb);
1004 #if IS_ENABLED(CONFIG_IPV6)
1005 if (l2tp_sk_is_v6(tunnel->sock))
1006 err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1007 else
1008 #endif
1009 err = ip_queue_xmit(tunnel->sock, skb, fl);
1010
1011 return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1012 }
1013
1014 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1015 {
1016 struct l2tp_tunnel *tunnel = session->tunnel;
1017 unsigned int data_len = skb->len;
1018 struct sock *sk = tunnel->sock;
1019 int headroom, uhlen, udp_len;
1020 int ret = NET_XMIT_SUCCESS;
1021 struct inet_sock *inet;
1022 struct udphdr *uh;
1023
1024
1025
1026
1027
1028 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1029 headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1030 if (skb_cow_head(skb, headroom)) {
1031 kfree_skb(skb);
1032 return NET_XMIT_DROP;
1033 }
1034
1035
1036 if (tunnel->version == L2TP_HDR_VER_2)
1037 l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1038 else
1039 l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1040
1041
1042 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1043 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1044 nf_reset_ct(skb);
1045
1046 bh_lock_sock(sk);
1047 if (sock_owned_by_user(sk)) {
1048 kfree_skb(skb);
1049 ret = NET_XMIT_DROP;
1050 goto out_unlock;
1051 }
1052
1053
1054
1055
1056 if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1057 kfree_skb(skb);
1058 ret = NET_XMIT_DROP;
1059 goto out_unlock;
1060 }
1061
1062
1063
1064
1065 *len = skb->len;
1066
1067 inet = inet_sk(sk);
1068 switch (tunnel->encap) {
1069 case L2TP_ENCAPTYPE_UDP:
1070
1071 __skb_push(skb, sizeof(*uh));
1072 skb_reset_transport_header(skb);
1073 uh = udp_hdr(skb);
1074 uh->source = inet->inet_sport;
1075 uh->dest = inet->inet_dport;
1076 udp_len = uhlen + session->hdr_len + data_len;
1077 uh->len = htons(udp_len);
1078
1079
1080 #if IS_ENABLED(CONFIG_IPV6)
1081 if (l2tp_sk_is_v6(sk))
1082 udp6_set_csum(udp_get_no_check6_tx(sk),
1083 skb, &inet6_sk(sk)->saddr,
1084 &sk->sk_v6_daddr, udp_len);
1085 else
1086 #endif
1087 udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1088 inet->inet_daddr, udp_len);
1089 break;
1090
1091 case L2TP_ENCAPTYPE_IP:
1092 break;
1093 }
1094
1095 ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1096
1097 out_unlock:
1098 bh_unlock_sock(sk);
1099
1100 return ret;
1101 }
1102
1103
1104
1105
1106 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1107 {
1108 unsigned int len = 0;
1109 int ret;
1110
1111 ret = l2tp_xmit_core(session, skb, &len);
1112 if (ret == NET_XMIT_SUCCESS) {
1113 atomic_long_inc(&session->tunnel->stats.tx_packets);
1114 atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1115 atomic_long_inc(&session->stats.tx_packets);
1116 atomic_long_add(len, &session->stats.tx_bytes);
1117 } else {
1118 atomic_long_inc(&session->tunnel->stats.tx_errors);
1119 atomic_long_inc(&session->stats.tx_errors);
1120 }
1121 return ret;
1122 }
1123 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 static void l2tp_tunnel_destruct(struct sock *sk)
1134 {
1135 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1136
1137 if (!tunnel)
1138 goto end;
1139
1140
1141 switch (tunnel->encap) {
1142 case L2TP_ENCAPTYPE_UDP:
1143
1144 (udp_sk(sk))->encap_type = 0;
1145 (udp_sk(sk))->encap_rcv = NULL;
1146 (udp_sk(sk))->encap_destroy = NULL;
1147 break;
1148 case L2TP_ENCAPTYPE_IP:
1149 break;
1150 }
1151
1152
1153 sk->sk_destruct = tunnel->old_sk_destruct;
1154 sk->sk_user_data = NULL;
1155
1156
1157 if (sk->sk_destruct)
1158 (*sk->sk_destruct)(sk);
1159
1160 kfree_rcu(tunnel, rcu);
1161 end:
1162 return;
1163 }
1164
1165
1166 static void l2tp_session_unhash(struct l2tp_session *session)
1167 {
1168 struct l2tp_tunnel *tunnel = session->tunnel;
1169
1170
1171 if (tunnel) {
1172
1173 spin_lock_bh(&tunnel->hlist_lock);
1174 hlist_del_init_rcu(&session->hlist);
1175 spin_unlock_bh(&tunnel->hlist_lock);
1176
1177
1178 if (tunnel->version != L2TP_HDR_VER_2) {
1179 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1180
1181 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1182 hlist_del_init_rcu(&session->global_hlist);
1183 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1184 }
1185
1186 synchronize_rcu();
1187 }
1188 }
1189
1190
1191
1192 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1193 {
1194 struct l2tp_session *session;
1195 int hash;
1196
1197 spin_lock_bh(&tunnel->hlist_lock);
1198 tunnel->acpt_newsess = false;
1199 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1200 again:
1201 hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
1202 hlist_del_init_rcu(&session->hlist);
1203
1204 spin_unlock_bh(&tunnel->hlist_lock);
1205 l2tp_session_delete(session);
1206 spin_lock_bh(&tunnel->hlist_lock);
1207
1208
1209
1210
1211
1212
1213 goto again;
1214 }
1215 }
1216 spin_unlock_bh(&tunnel->hlist_lock);
1217 }
1218
1219
1220 static void l2tp_udp_encap_destroy(struct sock *sk)
1221 {
1222 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1223
1224 if (tunnel)
1225 l2tp_tunnel_delete(tunnel);
1226 }
1227
1228
1229 static void l2tp_tunnel_del_work(struct work_struct *work)
1230 {
1231 struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1232 del_work);
1233 struct sock *sk = tunnel->sock;
1234 struct socket *sock = sk->sk_socket;
1235 struct l2tp_net *pn;
1236
1237 l2tp_tunnel_closeall(tunnel);
1238
1239
1240
1241
1242 if (tunnel->fd < 0) {
1243 if (sock) {
1244 kernel_sock_shutdown(sock, SHUT_RDWR);
1245 sock_release(sock);
1246 }
1247 }
1248
1249
1250 pn = l2tp_pernet(tunnel->l2tp_net);
1251 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1252 list_del_rcu(&tunnel->list);
1253 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1254
1255
1256 l2tp_tunnel_dec_refcount(tunnel);
1257
1258
1259 l2tp_tunnel_dec_refcount(tunnel);
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 static int l2tp_tunnel_sock_create(struct net *net,
1272 u32 tunnel_id,
1273 u32 peer_tunnel_id,
1274 struct l2tp_tunnel_cfg *cfg,
1275 struct socket **sockp)
1276 {
1277 int err = -EINVAL;
1278 struct socket *sock = NULL;
1279 struct udp_port_cfg udp_conf;
1280
1281 switch (cfg->encap) {
1282 case L2TP_ENCAPTYPE_UDP:
1283 memset(&udp_conf, 0, sizeof(udp_conf));
1284
1285 #if IS_ENABLED(CONFIG_IPV6)
1286 if (cfg->local_ip6 && cfg->peer_ip6) {
1287 udp_conf.family = AF_INET6;
1288 memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1289 sizeof(udp_conf.local_ip6));
1290 memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1291 sizeof(udp_conf.peer_ip6));
1292 udp_conf.use_udp6_tx_checksums =
1293 !cfg->udp6_zero_tx_checksums;
1294 udp_conf.use_udp6_rx_checksums =
1295 !cfg->udp6_zero_rx_checksums;
1296 } else
1297 #endif
1298 {
1299 udp_conf.family = AF_INET;
1300 udp_conf.local_ip = cfg->local_ip;
1301 udp_conf.peer_ip = cfg->peer_ip;
1302 udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1303 }
1304
1305 udp_conf.local_udp_port = htons(cfg->local_udp_port);
1306 udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1307
1308 err = udp_sock_create(net, &udp_conf, &sock);
1309 if (err < 0)
1310 goto out;
1311
1312 break;
1313
1314 case L2TP_ENCAPTYPE_IP:
1315 #if IS_ENABLED(CONFIG_IPV6)
1316 if (cfg->local_ip6 && cfg->peer_ip6) {
1317 struct sockaddr_l2tpip6 ip6_addr = {0};
1318
1319 err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1320 IPPROTO_L2TP, &sock);
1321 if (err < 0)
1322 goto out;
1323
1324 ip6_addr.l2tp_family = AF_INET6;
1325 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1326 sizeof(ip6_addr.l2tp_addr));
1327 ip6_addr.l2tp_conn_id = tunnel_id;
1328 err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1329 sizeof(ip6_addr));
1330 if (err < 0)
1331 goto out;
1332
1333 ip6_addr.l2tp_family = AF_INET6;
1334 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1335 sizeof(ip6_addr.l2tp_addr));
1336 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1337 err = kernel_connect(sock,
1338 (struct sockaddr *)&ip6_addr,
1339 sizeof(ip6_addr), 0);
1340 if (err < 0)
1341 goto out;
1342 } else
1343 #endif
1344 {
1345 struct sockaddr_l2tpip ip_addr = {0};
1346
1347 err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1348 IPPROTO_L2TP, &sock);
1349 if (err < 0)
1350 goto out;
1351
1352 ip_addr.l2tp_family = AF_INET;
1353 ip_addr.l2tp_addr = cfg->local_ip;
1354 ip_addr.l2tp_conn_id = tunnel_id;
1355 err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1356 sizeof(ip_addr));
1357 if (err < 0)
1358 goto out;
1359
1360 ip_addr.l2tp_family = AF_INET;
1361 ip_addr.l2tp_addr = cfg->peer_ip;
1362 ip_addr.l2tp_conn_id = peer_tunnel_id;
1363 err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1364 sizeof(ip_addr), 0);
1365 if (err < 0)
1366 goto out;
1367 }
1368 break;
1369
1370 default:
1371 goto out;
1372 }
1373
1374 out:
1375 *sockp = sock;
1376 if (err < 0 && sock) {
1377 kernel_sock_shutdown(sock, SHUT_RDWR);
1378 sock_release(sock);
1379 *sockp = NULL;
1380 }
1381
1382 return err;
1383 }
1384
1385 static struct lock_class_key l2tp_socket_class;
1386
1387 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1388 struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1389 {
1390 struct l2tp_tunnel *tunnel = NULL;
1391 int err;
1392 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1393
1394 if (cfg)
1395 encap = cfg->encap;
1396
1397 tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1398 if (!tunnel) {
1399 err = -ENOMEM;
1400 goto err;
1401 }
1402
1403 tunnel->version = version;
1404 tunnel->tunnel_id = tunnel_id;
1405 tunnel->peer_tunnel_id = peer_tunnel_id;
1406
1407 tunnel->magic = L2TP_TUNNEL_MAGIC;
1408 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1409 spin_lock_init(&tunnel->hlist_lock);
1410 tunnel->acpt_newsess = true;
1411
1412 tunnel->encap = encap;
1413
1414 refcount_set(&tunnel->ref_count, 1);
1415 tunnel->fd = fd;
1416
1417
1418 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1419
1420 INIT_LIST_HEAD(&tunnel->list);
1421
1422 err = 0;
1423 err:
1424 if (tunnelp)
1425 *tunnelp = tunnel;
1426
1427 return err;
1428 }
1429 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1430
1431 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1432 enum l2tp_encap_type encap)
1433 {
1434 if (!net_eq(sock_net(sk), net))
1435 return -EINVAL;
1436
1437 if (sk->sk_type != SOCK_DGRAM)
1438 return -EPROTONOSUPPORT;
1439
1440 if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1441 return -EPROTONOSUPPORT;
1442
1443 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1444 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1445 return -EPROTONOSUPPORT;
1446
1447 if (sk->sk_user_data)
1448 return -EBUSY;
1449
1450 return 0;
1451 }
1452
1453 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1454 struct l2tp_tunnel_cfg *cfg)
1455 {
1456 struct l2tp_tunnel *tunnel_walk;
1457 struct l2tp_net *pn;
1458 struct socket *sock;
1459 struct sock *sk;
1460 int ret;
1461
1462 if (tunnel->fd < 0) {
1463 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1464 tunnel->peer_tunnel_id, cfg,
1465 &sock);
1466 if (ret < 0)
1467 goto err;
1468 } else {
1469 sock = sockfd_lookup(tunnel->fd, &ret);
1470 if (!sock)
1471 goto err;
1472
1473 ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
1474 if (ret < 0)
1475 goto err_sock;
1476 }
1477
1478 tunnel->l2tp_net = net;
1479 pn = l2tp_pernet(net);
1480
1481 sk = sock->sk;
1482 sock_hold(sk);
1483 tunnel->sock = sk;
1484
1485 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1486 list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
1487 if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
1488 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1489 sock_put(sk);
1490 ret = -EEXIST;
1491 goto err_sock;
1492 }
1493 }
1494 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1495 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1496
1497 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1498 struct udp_tunnel_sock_cfg udp_cfg = {
1499 .sk_user_data = tunnel,
1500 .encap_type = UDP_ENCAP_L2TPINUDP,
1501 .encap_rcv = l2tp_udp_encap_recv,
1502 .encap_destroy = l2tp_udp_encap_destroy,
1503 };
1504
1505 setup_udp_tunnel_sock(net, sock, &udp_cfg);
1506 } else {
1507 sk->sk_user_data = tunnel;
1508 }
1509
1510 tunnel->old_sk_destruct = sk->sk_destruct;
1511 sk->sk_destruct = &l2tp_tunnel_destruct;
1512 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
1513 "l2tp_sock");
1514 sk->sk_allocation = GFP_ATOMIC;
1515
1516 trace_register_tunnel(tunnel);
1517
1518 if (tunnel->fd >= 0)
1519 sockfd_put(sock);
1520
1521 return 0;
1522
1523 err_sock:
1524 if (tunnel->fd < 0)
1525 sock_release(sock);
1526 else
1527 sockfd_put(sock);
1528 err:
1529 return ret;
1530 }
1531 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1532
1533
1534
1535 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1536 {
1537 if (!test_and_set_bit(0, &tunnel->dead)) {
1538 trace_delete_tunnel(tunnel);
1539 l2tp_tunnel_inc_refcount(tunnel);
1540 queue_work(l2tp_wq, &tunnel->del_work);
1541 }
1542 }
1543 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1544
1545 void l2tp_session_delete(struct l2tp_session *session)
1546 {
1547 if (test_and_set_bit(0, &session->dead))
1548 return;
1549
1550 trace_delete_session(session);
1551 l2tp_session_unhash(session);
1552 l2tp_session_queue_purge(session);
1553 if (session->session_close)
1554 (*session->session_close)(session);
1555
1556 l2tp_session_dec_refcount(session);
1557 }
1558 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1559
1560
1561
1562
1563 void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1564 {
1565 if (version == L2TP_HDR_VER_2) {
1566 session->hdr_len = 6;
1567 if (session->send_seq)
1568 session->hdr_len += 4;
1569 } else {
1570 session->hdr_len = 4 + session->cookie_len;
1571 session->hdr_len += l2tp_get_l2specific_len(session);
1572 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1573 session->hdr_len += 4;
1574 }
1575 }
1576 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1577
1578 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1579 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1580 {
1581 struct l2tp_session *session;
1582
1583 session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1584 if (session) {
1585 session->magic = L2TP_SESSION_MAGIC;
1586 session->tunnel = tunnel;
1587
1588 session->session_id = session_id;
1589 session->peer_session_id = peer_session_id;
1590 session->nr = 0;
1591 if (tunnel->version == L2TP_HDR_VER_2)
1592 session->nr_max = 0xffff;
1593 else
1594 session->nr_max = 0xffffff;
1595 session->nr_window_size = session->nr_max / 2;
1596 session->nr_oos_count_max = 4;
1597
1598
1599 session->reorder_skip = 1;
1600
1601 sprintf(&session->name[0], "sess %u/%u",
1602 tunnel->tunnel_id, session->session_id);
1603
1604 skb_queue_head_init(&session->reorder_q);
1605
1606 INIT_HLIST_NODE(&session->hlist);
1607 INIT_HLIST_NODE(&session->global_hlist);
1608
1609 if (cfg) {
1610 session->pwtype = cfg->pw_type;
1611 session->send_seq = cfg->send_seq;
1612 session->recv_seq = cfg->recv_seq;
1613 session->lns_mode = cfg->lns_mode;
1614 session->reorder_timeout = cfg->reorder_timeout;
1615 session->l2specific_type = cfg->l2specific_type;
1616 session->cookie_len = cfg->cookie_len;
1617 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1618 session->peer_cookie_len = cfg->peer_cookie_len;
1619 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1620 }
1621
1622 l2tp_session_set_header_len(session, tunnel->version);
1623
1624 refcount_set(&session->ref_count, 1);
1625
1626 return session;
1627 }
1628
1629 return ERR_PTR(-ENOMEM);
1630 }
1631 EXPORT_SYMBOL_GPL(l2tp_session_create);
1632
1633
1634
1635
1636
1637 static __net_init int l2tp_init_net(struct net *net)
1638 {
1639 struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1640 int hash;
1641
1642 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1643 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1644
1645 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1646 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1647
1648 spin_lock_init(&pn->l2tp_session_hlist_lock);
1649
1650 return 0;
1651 }
1652
1653 static __net_exit void l2tp_exit_net(struct net *net)
1654 {
1655 struct l2tp_net *pn = l2tp_pernet(net);
1656 struct l2tp_tunnel *tunnel = NULL;
1657 int hash;
1658
1659 rcu_read_lock_bh();
1660 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1661 l2tp_tunnel_delete(tunnel);
1662 }
1663 rcu_read_unlock_bh();
1664
1665 if (l2tp_wq)
1666 flush_workqueue(l2tp_wq);
1667 rcu_barrier();
1668
1669 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1670 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1671 }
1672
1673 static struct pernet_operations l2tp_net_ops = {
1674 .init = l2tp_init_net,
1675 .exit = l2tp_exit_net,
1676 .id = &l2tp_net_id,
1677 .size = sizeof(struct l2tp_net),
1678 };
1679
1680 static int __init l2tp_init(void)
1681 {
1682 int rc = 0;
1683
1684 rc = register_pernet_device(&l2tp_net_ops);
1685 if (rc)
1686 goto out;
1687
1688 l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1689 if (!l2tp_wq) {
1690 pr_err("alloc_workqueue failed\n");
1691 unregister_pernet_device(&l2tp_net_ops);
1692 rc = -ENOMEM;
1693 goto out;
1694 }
1695
1696 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1697
1698 out:
1699 return rc;
1700 }
1701
1702 static void __exit l2tp_exit(void)
1703 {
1704 unregister_pernet_device(&l2tp_net_ops);
1705 if (l2tp_wq) {
1706 destroy_workqueue(l2tp_wq);
1707 l2tp_wq = NULL;
1708 }
1709 }
1710
1711 module_init(l2tp_init);
1712 module_exit(l2tp_exit);
1713
1714 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1715 MODULE_DESCRIPTION("L2TP core");
1716 MODULE_LICENSE("GPL");
1717 MODULE_VERSION(L2TP_DRV_VERSION);