Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  Linux NET3: GRE over IP protocol decoder.
0004  *
0005  *  Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
0006  */
0007 
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009 
0010 #include <linux/capability.h>
0011 #include <linux/module.h>
0012 #include <linux/types.h>
0013 #include <linux/kernel.h>
0014 #include <linux/slab.h>
0015 #include <linux/uaccess.h>
0016 #include <linux/skbuff.h>
0017 #include <linux/netdevice.h>
0018 #include <linux/in.h>
0019 #include <linux/tcp.h>
0020 #include <linux/udp.h>
0021 #include <linux/if_arp.h>
0022 #include <linux/if_vlan.h>
0023 #include <linux/init.h>
0024 #include <linux/in6.h>
0025 #include <linux/inetdevice.h>
0026 #include <linux/igmp.h>
0027 #include <linux/netfilter_ipv4.h>
0028 #include <linux/etherdevice.h>
0029 #include <linux/if_ether.h>
0030 
0031 #include <net/sock.h>
0032 #include <net/ip.h>
0033 #include <net/icmp.h>
0034 #include <net/protocol.h>
0035 #include <net/ip_tunnels.h>
0036 #include <net/arp.h>
0037 #include <net/checksum.h>
0038 #include <net/dsfield.h>
0039 #include <net/inet_ecn.h>
0040 #include <net/xfrm.h>
0041 #include <net/net_namespace.h>
0042 #include <net/netns/generic.h>
0043 #include <net/rtnetlink.h>
0044 #include <net/gre.h>
0045 #include <net/dst_metadata.h>
0046 #include <net/erspan.h>
0047 
0048 /*
0049    Problems & solutions
0050    --------------------
0051 
0052    1. The most important issue is detecting local dead loops.
0053    They would cause complete host lockup in transmit, which
0054    would be "resolved" by stack overflow or, if queueing is enabled,
0055    with infinite looping in net_bh.
0056 
0057    We cannot track such dead loops during route installation,
0058    it is infeasible task. The most general solutions would be
0059    to keep skb->encapsulation counter (sort of local ttl),
0060    and silently drop packet when it expires. It is a good
0061    solution, but it supposes maintaining new variable in ALL
0062    skb, even if no tunneling is used.
0063 
0064    Current solution: xmit_recursion breaks dead loops. This is a percpu
0065    counter, since when we enter the first ndo_xmit(), cpu migration is
0066    forbidden. We force an exit if this counter reaches RECURSION_LIMIT
0067 
0068    2. Networking dead loops would not kill routers, but would really
0069    kill network. IP hop limit plays role of "t->recursion" in this case,
0070    if we copy it from packet being encapsulated to upper header.
0071    It is very good solution, but it introduces two problems:
0072 
0073    - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
0074      do not work over tunnels.
0075    - traceroute does not work. I planned to relay ICMP from tunnel,
0076      so that this problem would be solved and traceroute output
0077      would even more informative. This idea appeared to be wrong:
0078      only Linux complies to rfc1812 now (yes, guys, Linux is the only
0079      true router now :-)), all routers (at least, in neighbourhood of mine)
0080      return only 8 bytes of payload. It is the end.
0081 
0082    Hence, if we want that OSPF worked or traceroute said something reasonable,
0083    we should search for another solution.
0084 
0085    One of them is to parse packet trying to detect inner encapsulation
0086    made by our node. It is difficult or even impossible, especially,
0087    taking into account fragmentation. TO be short, ttl is not solution at all.
0088 
0089    Current solution: The solution was UNEXPECTEDLY SIMPLE.
0090    We force DF flag on tunnels with preconfigured hop limit,
0091    that is ALL. :-) Well, it does not remove the problem completely,
0092    but exponential growth of network traffic is changed to linear
0093    (branches, that exceed pmtu are pruned) and tunnel mtu
0094    rapidly degrades to value <68, where looping stops.
0095    Yes, it is not good if there exists a router in the loop,
0096    which does not force DF, even when encapsulating packets have DF set.
0097    But it is not our problem! Nobody could accuse us, we made
0098    all that we could make. Even if it is your gated who injected
0099    fatal route to network, even if it were you who configured
0100    fatal static route: you are innocent. :-)
0101 
0102    Alexey Kuznetsov.
0103  */
0104 
0105 static bool log_ecn_error = true;
0106 module_param(log_ecn_error, bool, 0644);
0107 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
0108 
0109 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
0110 static const struct header_ops ipgre_header_ops;
0111 
0112 static int ipgre_tunnel_init(struct net_device *dev);
0113 static void erspan_build_header(struct sk_buff *skb,
0114                 u32 id, u32 index,
0115                 bool truncate, bool is_ipv4);
0116 
0117 static unsigned int ipgre_net_id __read_mostly;
0118 static unsigned int gre_tap_net_id __read_mostly;
0119 static unsigned int erspan_net_id __read_mostly;
0120 
0121 static int ipgre_err(struct sk_buff *skb, u32 info,
0122              const struct tnl_ptk_info *tpi)
0123 {
0124 
0125     /* All the routers (except for Linux) return only
0126        8 bytes of packet payload. It means, that precise relaying of
0127        ICMP in the real Internet is absolutely infeasible.
0128 
0129        Moreover, Cisco "wise men" put GRE key to the third word
0130        in GRE header. It makes impossible maintaining even soft
0131        state for keyed GRE tunnels with enabled checksum. Tell
0132        them "thank you".
0133 
0134        Well, I wonder, rfc1812 was written by Cisco employee,
0135        what the hell these idiots break standards established
0136        by themselves???
0137        */
0138     struct net *net = dev_net(skb->dev);
0139     struct ip_tunnel_net *itn;
0140     const struct iphdr *iph;
0141     const int type = icmp_hdr(skb)->type;
0142     const int code = icmp_hdr(skb)->code;
0143     unsigned int data_len = 0;
0144     struct ip_tunnel *t;
0145 
0146     if (tpi->proto == htons(ETH_P_TEB))
0147         itn = net_generic(net, gre_tap_net_id);
0148     else if (tpi->proto == htons(ETH_P_ERSPAN) ||
0149          tpi->proto == htons(ETH_P_ERSPAN2))
0150         itn = net_generic(net, erspan_net_id);
0151     else
0152         itn = net_generic(net, ipgre_net_id);
0153 
0154     iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
0155     t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
0156                  iph->daddr, iph->saddr, tpi->key);
0157 
0158     if (!t)
0159         return -ENOENT;
0160 
0161     switch (type) {
0162     default:
0163     case ICMP_PARAMETERPROB:
0164         return 0;
0165 
0166     case ICMP_DEST_UNREACH:
0167         switch (code) {
0168         case ICMP_SR_FAILED:
0169         case ICMP_PORT_UNREACH:
0170             /* Impossible event. */
0171             return 0;
0172         default:
0173             /* All others are translated to HOST_UNREACH.
0174                rfc2003 contains "deep thoughts" about NET_UNREACH,
0175                I believe they are just ether pollution. --ANK
0176              */
0177             break;
0178         }
0179         break;
0180 
0181     case ICMP_TIME_EXCEEDED:
0182         if (code != ICMP_EXC_TTL)
0183             return 0;
0184         data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
0185         break;
0186 
0187     case ICMP_REDIRECT:
0188         break;
0189     }
0190 
0191 #if IS_ENABLED(CONFIG_IPV6)
0192        if (tpi->proto == htons(ETH_P_IPV6) &&
0193            !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
0194                        type, data_len))
0195                return 0;
0196 #endif
0197 
0198     if (t->parms.iph.daddr == 0 ||
0199         ipv4_is_multicast(t->parms.iph.daddr))
0200         return 0;
0201 
0202     if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
0203         return 0;
0204 
0205     if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
0206         t->err_count++;
0207     else
0208         t->err_count = 1;
0209     t->err_time = jiffies;
0210 
0211     return 0;
0212 }
0213 
0214 static void gre_err(struct sk_buff *skb, u32 info)
0215 {
0216     /* All the routers (except for Linux) return only
0217      * 8 bytes of packet payload. It means, that precise relaying of
0218      * ICMP in the real Internet is absolutely infeasible.
0219      *
0220      * Moreover, Cisco "wise men" put GRE key to the third word
0221      * in GRE header. It makes impossible maintaining even soft
0222      * state for keyed
0223      * GRE tunnels with enabled checksum. Tell them "thank you".
0224      *
0225      * Well, I wonder, rfc1812 was written by Cisco employee,
0226      * what the hell these idiots break standards established
0227      * by themselves???
0228      */
0229 
0230     const struct iphdr *iph = (struct iphdr *)skb->data;
0231     const int type = icmp_hdr(skb)->type;
0232     const int code = icmp_hdr(skb)->code;
0233     struct tnl_ptk_info tpi;
0234 
0235     if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
0236                  iph->ihl * 4) < 0)
0237         return;
0238 
0239     if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
0240         ipv4_update_pmtu(skb, dev_net(skb->dev), info,
0241                  skb->dev->ifindex, IPPROTO_GRE);
0242         return;
0243     }
0244     if (type == ICMP_REDIRECT) {
0245         ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
0246                   IPPROTO_GRE);
0247         return;
0248     }
0249 
0250     ipgre_err(skb, info, &tpi);
0251 }
0252 
0253 static bool is_erspan_type1(int gre_hdr_len)
0254 {
0255     /* Both ERSPAN type I (version 0) and type II (version 1) use
0256      * protocol 0x88BE, but the type I has only 4-byte GRE header,
0257      * while type II has 8-byte.
0258      */
0259     return gre_hdr_len == 4;
0260 }
0261 
0262 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
0263               int gre_hdr_len)
0264 {
0265     struct net *net = dev_net(skb->dev);
0266     struct metadata_dst *tun_dst = NULL;
0267     struct erspan_base_hdr *ershdr;
0268     struct ip_tunnel_net *itn;
0269     struct ip_tunnel *tunnel;
0270     const struct iphdr *iph;
0271     struct erspan_md2 *md2;
0272     int ver;
0273     int len;
0274 
0275     itn = net_generic(net, erspan_net_id);
0276     iph = ip_hdr(skb);
0277     if (is_erspan_type1(gre_hdr_len)) {
0278         ver = 0;
0279         tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
0280                       tpi->flags | TUNNEL_NO_KEY,
0281                       iph->saddr, iph->daddr, 0);
0282     } else {
0283         ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
0284         ver = ershdr->ver;
0285         tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
0286                       tpi->flags | TUNNEL_KEY,
0287                       iph->saddr, iph->daddr, tpi->key);
0288     }
0289 
0290     if (tunnel) {
0291         if (is_erspan_type1(gre_hdr_len))
0292             len = gre_hdr_len;
0293         else
0294             len = gre_hdr_len + erspan_hdr_len(ver);
0295 
0296         if (unlikely(!pskb_may_pull(skb, len)))
0297             return PACKET_REJECT;
0298 
0299         if (__iptunnel_pull_header(skb,
0300                        len,
0301                        htons(ETH_P_TEB),
0302                        false, false) < 0)
0303             goto drop;
0304 
0305         if (tunnel->collect_md) {
0306             struct erspan_metadata *pkt_md, *md;
0307             struct ip_tunnel_info *info;
0308             unsigned char *gh;
0309             __be64 tun_id;
0310             __be16 flags;
0311 
0312             tpi->flags |= TUNNEL_KEY;
0313             flags = tpi->flags;
0314             tun_id = key32_to_tunnel_id(tpi->key);
0315 
0316             tun_dst = ip_tun_rx_dst(skb, flags,
0317                         tun_id, sizeof(*md));
0318             if (!tun_dst)
0319                 return PACKET_REJECT;
0320 
0321             /* skb can be uncloned in __iptunnel_pull_header, so
0322              * old pkt_md is no longer valid and we need to reset
0323              * it
0324              */
0325             gh = skb_network_header(skb) +
0326                  skb_network_header_len(skb);
0327             pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
0328                                 sizeof(*ershdr));
0329             md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
0330             md->version = ver;
0331             md2 = &md->u.md2;
0332             memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
0333                                ERSPAN_V2_MDSIZE);
0334 
0335             info = &tun_dst->u.tun_info;
0336             info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
0337             info->options_len = sizeof(*md);
0338         }
0339 
0340         skb_reset_mac_header(skb);
0341         ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
0342         return PACKET_RCVD;
0343     }
0344     return PACKET_REJECT;
0345 
0346 drop:
0347     kfree_skb(skb);
0348     return PACKET_RCVD;
0349 }
0350 
0351 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
0352                struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
0353 {
0354     struct metadata_dst *tun_dst = NULL;
0355     const struct iphdr *iph;
0356     struct ip_tunnel *tunnel;
0357 
0358     iph = ip_hdr(skb);
0359     tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
0360                   iph->saddr, iph->daddr, tpi->key);
0361 
0362     if (tunnel) {
0363         const struct iphdr *tnl_params;
0364 
0365         if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
0366                        raw_proto, false) < 0)
0367             goto drop;
0368 
0369         /* Special case for ipgre_header_parse(), which expects the
0370          * mac_header to point to the outer IP header.
0371          */
0372         if (tunnel->dev->header_ops == &ipgre_header_ops)
0373             skb_pop_mac_header(skb);
0374         else
0375             skb_reset_mac_header(skb);
0376 
0377         tnl_params = &tunnel->parms.iph;
0378         if (tunnel->collect_md || tnl_params->daddr == 0) {
0379             __be16 flags;
0380             __be64 tun_id;
0381 
0382             flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
0383             tun_id = key32_to_tunnel_id(tpi->key);
0384             tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
0385             if (!tun_dst)
0386                 return PACKET_REJECT;
0387         }
0388 
0389         ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
0390         return PACKET_RCVD;
0391     }
0392     return PACKET_NEXT;
0393 
0394 drop:
0395     kfree_skb(skb);
0396     return PACKET_RCVD;
0397 }
0398 
0399 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
0400              int hdr_len)
0401 {
0402     struct net *net = dev_net(skb->dev);
0403     struct ip_tunnel_net *itn;
0404     int res;
0405 
0406     if (tpi->proto == htons(ETH_P_TEB))
0407         itn = net_generic(net, gre_tap_net_id);
0408     else
0409         itn = net_generic(net, ipgre_net_id);
0410 
0411     res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
0412     if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
0413         /* ipgre tunnels in collect metadata mode should receive
0414          * also ETH_P_TEB traffic.
0415          */
0416         itn = net_generic(net, ipgre_net_id);
0417         res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
0418     }
0419     return res;
0420 }
0421 
0422 static int gre_rcv(struct sk_buff *skb)
0423 {
0424     struct tnl_ptk_info tpi;
0425     bool csum_err = false;
0426     int hdr_len;
0427 
0428 #ifdef CONFIG_NET_IPGRE_BROADCAST
0429     if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
0430         /* Looped back packet, drop it! */
0431         if (rt_is_output_route(skb_rtable(skb)))
0432             goto drop;
0433     }
0434 #endif
0435 
0436     hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
0437     if (hdr_len < 0)
0438         goto drop;
0439 
0440     if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
0441              tpi.proto == htons(ETH_P_ERSPAN2))) {
0442         if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
0443             return 0;
0444         goto out;
0445     }
0446 
0447     if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
0448         return 0;
0449 
0450 out:
0451     icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
0452 drop:
0453     kfree_skb(skb);
0454     return 0;
0455 }
0456 
0457 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
0458                const struct iphdr *tnl_params,
0459                __be16 proto)
0460 {
0461     struct ip_tunnel *tunnel = netdev_priv(dev);
0462     __be16 flags = tunnel->parms.o_flags;
0463 
0464     /* Push GRE header. */
0465     gre_build_header(skb, tunnel->tun_hlen,
0466              flags, proto, tunnel->parms.o_key,
0467              (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
0468 
0469     ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
0470 }
0471 
0472 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
0473 {
0474     return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
0475 }
0476 
0477 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
0478             __be16 proto)
0479 {
0480     struct ip_tunnel *tunnel = netdev_priv(dev);
0481     struct ip_tunnel_info *tun_info;
0482     const struct ip_tunnel_key *key;
0483     int tunnel_hlen;
0484     __be16 flags;
0485 
0486     tun_info = skb_tunnel_info(skb);
0487     if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
0488              ip_tunnel_info_af(tun_info) != AF_INET))
0489         goto err_free_skb;
0490 
0491     key = &tun_info->key;
0492     tunnel_hlen = gre_calc_hlen(key->tun_flags);
0493 
0494     if (skb_cow_head(skb, dev->needed_headroom))
0495         goto err_free_skb;
0496 
0497     /* Push Tunnel header. */
0498     if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
0499         goto err_free_skb;
0500 
0501     flags = tun_info->key.tun_flags &
0502         (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
0503     gre_build_header(skb, tunnel_hlen, flags, proto,
0504              tunnel_id_to_key32(tun_info->key.tun_id),
0505              (flags & TUNNEL_SEQ) ? htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
0506 
0507     ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
0508 
0509     return;
0510 
0511 err_free_skb:
0512     kfree_skb(skb);
0513     dev->stats.tx_dropped++;
0514 }
0515 
0516 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
0517 {
0518     struct ip_tunnel *tunnel = netdev_priv(dev);
0519     struct ip_tunnel_info *tun_info;
0520     const struct ip_tunnel_key *key;
0521     struct erspan_metadata *md;
0522     bool truncate = false;
0523     __be16 proto;
0524     int tunnel_hlen;
0525     int version;
0526     int nhoff;
0527 
0528     tun_info = skb_tunnel_info(skb);
0529     if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
0530              ip_tunnel_info_af(tun_info) != AF_INET))
0531         goto err_free_skb;
0532 
0533     key = &tun_info->key;
0534     if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
0535         goto err_free_skb;
0536     if (tun_info->options_len < sizeof(*md))
0537         goto err_free_skb;
0538     md = ip_tunnel_info_opts(tun_info);
0539 
0540     /* ERSPAN has fixed 8 byte GRE header */
0541     version = md->version;
0542     tunnel_hlen = 8 + erspan_hdr_len(version);
0543 
0544     if (skb_cow_head(skb, dev->needed_headroom))
0545         goto err_free_skb;
0546 
0547     if (gre_handle_offloads(skb, false))
0548         goto err_free_skb;
0549 
0550     if (skb->len > dev->mtu + dev->hard_header_len) {
0551         pskb_trim(skb, dev->mtu + dev->hard_header_len);
0552         truncate = true;
0553     }
0554 
0555     nhoff = skb_network_header(skb) - skb_mac_header(skb);
0556     if (skb->protocol == htons(ETH_P_IP) &&
0557         (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
0558         truncate = true;
0559 
0560     if (skb->protocol == htons(ETH_P_IPV6)) {
0561         int thoff;
0562 
0563         if (skb_transport_header_was_set(skb))
0564             thoff = skb_transport_header(skb) - skb_mac_header(skb);
0565         else
0566             thoff = nhoff + sizeof(struct ipv6hdr);
0567         if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
0568             truncate = true;
0569     }
0570 
0571     if (version == 1) {
0572         erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
0573                     ntohl(md->u.index), truncate, true);
0574         proto = htons(ETH_P_ERSPAN);
0575     } else if (version == 2) {
0576         erspan_build_header_v2(skb,
0577                        ntohl(tunnel_id_to_key32(key->tun_id)),
0578                        md->u.md2.dir,
0579                        get_hwid(&md->u.md2),
0580                        truncate, true);
0581         proto = htons(ETH_P_ERSPAN2);
0582     } else {
0583         goto err_free_skb;
0584     }
0585 
0586     gre_build_header(skb, 8, TUNNEL_SEQ,
0587              proto, 0, htonl(atomic_fetch_inc(&tunnel->o_seqno)));
0588 
0589     ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
0590 
0591     return;
0592 
0593 err_free_skb:
0594     kfree_skb(skb);
0595     dev->stats.tx_dropped++;
0596 }
0597 
0598 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
0599 {
0600     struct ip_tunnel_info *info = skb_tunnel_info(skb);
0601     const struct ip_tunnel_key *key;
0602     struct rtable *rt;
0603     struct flowi4 fl4;
0604 
0605     if (ip_tunnel_info_af(info) != AF_INET)
0606         return -EINVAL;
0607 
0608     key = &info->key;
0609     ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
0610                 tunnel_id_to_key32(key->tun_id),
0611                 key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
0612                 skb->mark, skb_get_hash(skb), key->flow_flags);
0613     rt = ip_route_output_key(dev_net(dev), &fl4);
0614     if (IS_ERR(rt))
0615         return PTR_ERR(rt);
0616 
0617     ip_rt_put(rt);
0618     info->key.u.ipv4.src = fl4.saddr;
0619     return 0;
0620 }
0621 
0622 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
0623                   struct net_device *dev)
0624 {
0625     struct ip_tunnel *tunnel = netdev_priv(dev);
0626     const struct iphdr *tnl_params;
0627 
0628     if (!pskb_inet_may_pull(skb))
0629         goto free_skb;
0630 
0631     if (tunnel->collect_md) {
0632         gre_fb_xmit(skb, dev, skb->protocol);
0633         return NETDEV_TX_OK;
0634     }
0635 
0636     if (dev->header_ops) {
0637         if (skb_cow_head(skb, 0))
0638             goto free_skb;
0639 
0640         tnl_params = (const struct iphdr *)skb->data;
0641 
0642         /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
0643          * to gre header.
0644          */
0645         skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
0646         skb_reset_mac_header(skb);
0647 
0648         if (skb->ip_summed == CHECKSUM_PARTIAL &&
0649             skb_checksum_start(skb) < skb->data)
0650             goto free_skb;
0651     } else {
0652         if (skb_cow_head(skb, dev->needed_headroom))
0653             goto free_skb;
0654 
0655         tnl_params = &tunnel->parms.iph;
0656     }
0657 
0658     if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
0659         goto free_skb;
0660 
0661     __gre_xmit(skb, dev, tnl_params, skb->protocol);
0662     return NETDEV_TX_OK;
0663 
0664 free_skb:
0665     kfree_skb(skb);
0666     dev->stats.tx_dropped++;
0667     return NETDEV_TX_OK;
0668 }
0669 
0670 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
0671                    struct net_device *dev)
0672 {
0673     struct ip_tunnel *tunnel = netdev_priv(dev);
0674     bool truncate = false;
0675     __be16 proto;
0676 
0677     if (!pskb_inet_may_pull(skb))
0678         goto free_skb;
0679 
0680     if (tunnel->collect_md) {
0681         erspan_fb_xmit(skb, dev);
0682         return NETDEV_TX_OK;
0683     }
0684 
0685     if (gre_handle_offloads(skb, false))
0686         goto free_skb;
0687 
0688     if (skb_cow_head(skb, dev->needed_headroom))
0689         goto free_skb;
0690 
0691     if (skb->len > dev->mtu + dev->hard_header_len) {
0692         pskb_trim(skb, dev->mtu + dev->hard_header_len);
0693         truncate = true;
0694     }
0695 
0696     /* Push ERSPAN header */
0697     if (tunnel->erspan_ver == 0) {
0698         proto = htons(ETH_P_ERSPAN);
0699         tunnel->parms.o_flags &= ~TUNNEL_SEQ;
0700     } else if (tunnel->erspan_ver == 1) {
0701         erspan_build_header(skb, ntohl(tunnel->parms.o_key),
0702                     tunnel->index,
0703                     truncate, true);
0704         proto = htons(ETH_P_ERSPAN);
0705     } else if (tunnel->erspan_ver == 2) {
0706         erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
0707                        tunnel->dir, tunnel->hwid,
0708                        truncate, true);
0709         proto = htons(ETH_P_ERSPAN2);
0710     } else {
0711         goto free_skb;
0712     }
0713 
0714     tunnel->parms.o_flags &= ~TUNNEL_KEY;
0715     __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
0716     return NETDEV_TX_OK;
0717 
0718 free_skb:
0719     kfree_skb(skb);
0720     dev->stats.tx_dropped++;
0721     return NETDEV_TX_OK;
0722 }
0723 
0724 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
0725                 struct net_device *dev)
0726 {
0727     struct ip_tunnel *tunnel = netdev_priv(dev);
0728 
0729     if (!pskb_inet_may_pull(skb))
0730         goto free_skb;
0731 
0732     if (tunnel->collect_md) {
0733         gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
0734         return NETDEV_TX_OK;
0735     }
0736 
0737     if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
0738         goto free_skb;
0739 
0740     if (skb_cow_head(skb, dev->needed_headroom))
0741         goto free_skb;
0742 
0743     __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
0744     return NETDEV_TX_OK;
0745 
0746 free_skb:
0747     kfree_skb(skb);
0748     dev->stats.tx_dropped++;
0749     return NETDEV_TX_OK;
0750 }
0751 
0752 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
0753 {
0754     struct ip_tunnel *tunnel = netdev_priv(dev);
0755     __be16 flags;
0756     int len;
0757 
0758     len = tunnel->tun_hlen;
0759     tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
0760     len = tunnel->tun_hlen - len;
0761     tunnel->hlen = tunnel->hlen + len;
0762 
0763     if (dev->header_ops)
0764         dev->hard_header_len += len;
0765     else
0766         dev->needed_headroom += len;
0767 
0768     if (set_mtu)
0769         dev->mtu = max_t(int, dev->mtu - len, 68);
0770 
0771     flags = tunnel->parms.o_flags;
0772 
0773     if (flags & TUNNEL_SEQ ||
0774         (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
0775         dev->features &= ~NETIF_F_GSO_SOFTWARE;
0776         dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
0777     } else {
0778         dev->features |= NETIF_F_GSO_SOFTWARE;
0779         dev->hw_features |= NETIF_F_GSO_SOFTWARE;
0780     }
0781 }
0782 
0783 static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
0784                 int cmd)
0785 {
0786     int err;
0787 
0788     if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
0789         if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
0790             p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
0791             ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
0792             return -EINVAL;
0793     }
0794 
0795     p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
0796     p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
0797 
0798     err = ip_tunnel_ctl(dev, p, cmd);
0799     if (err)
0800         return err;
0801 
0802     if (cmd == SIOCCHGTUNNEL) {
0803         struct ip_tunnel *t = netdev_priv(dev);
0804 
0805         t->parms.i_flags = p->i_flags;
0806         t->parms.o_flags = p->o_flags;
0807 
0808         if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
0809             ipgre_link_update(dev, true);
0810     }
0811 
0812     p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
0813     p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
0814     return 0;
0815 }
0816 
0817 /* Nice toy. Unfortunately, useless in real life :-)
0818    It allows to construct virtual multiprotocol broadcast "LAN"
0819    over the Internet, provided multicast routing is tuned.
0820 
0821 
0822    I have no idea was this bicycle invented before me,
0823    so that I had to set ARPHRD_IPGRE to a random value.
0824    I have an impression, that Cisco could make something similar,
0825    but this feature is apparently missing in IOS<=11.2(8).
0826 
0827    I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
0828    with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
0829 
0830    ping -t 255 224.66.66.66
0831 
0832    If nobody answers, mbone does not work.
0833 
0834    ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
0835    ip addr add 10.66.66.<somewhat>/24 dev Universe
0836    ifconfig Universe up
0837    ifconfig Universe add fe80::<Your_real_addr>/10
0838    ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
0839    ftp 10.66.66.66
0840    ...
0841    ftp fec0:6666:6666::193.233.7.65
0842    ...
0843  */
0844 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
0845             unsigned short type,
0846             const void *daddr, const void *saddr, unsigned int len)
0847 {
0848     struct ip_tunnel *t = netdev_priv(dev);
0849     struct iphdr *iph;
0850     struct gre_base_hdr *greh;
0851 
0852     iph = skb_push(skb, t->hlen + sizeof(*iph));
0853     greh = (struct gre_base_hdr *)(iph+1);
0854     greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
0855     greh->protocol = htons(type);
0856 
0857     memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
0858 
0859     /* Set the source hardware address. */
0860     if (saddr)
0861         memcpy(&iph->saddr, saddr, 4);
0862     if (daddr)
0863         memcpy(&iph->daddr, daddr, 4);
0864     if (iph->daddr)
0865         return t->hlen + sizeof(*iph);
0866 
0867     return -(t->hlen + sizeof(*iph));
0868 }
0869 
0870 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
0871 {
0872     const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
0873     memcpy(haddr, &iph->saddr, 4);
0874     return 4;
0875 }
0876 
0877 static const struct header_ops ipgre_header_ops = {
0878     .create = ipgre_header,
0879     .parse  = ipgre_header_parse,
0880 };
0881 
0882 #ifdef CONFIG_NET_IPGRE_BROADCAST
0883 static int ipgre_open(struct net_device *dev)
0884 {
0885     struct ip_tunnel *t = netdev_priv(dev);
0886 
0887     if (ipv4_is_multicast(t->parms.iph.daddr)) {
0888         struct flowi4 fl4;
0889         struct rtable *rt;
0890 
0891         rt = ip_route_output_gre(t->net, &fl4,
0892                      t->parms.iph.daddr,
0893                      t->parms.iph.saddr,
0894                      t->parms.o_key,
0895                      RT_TOS(t->parms.iph.tos),
0896                      t->parms.link);
0897         if (IS_ERR(rt))
0898             return -EADDRNOTAVAIL;
0899         dev = rt->dst.dev;
0900         ip_rt_put(rt);
0901         if (!__in_dev_get_rtnl(dev))
0902             return -EADDRNOTAVAIL;
0903         t->mlink = dev->ifindex;
0904         ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
0905     }
0906     return 0;
0907 }
0908 
0909 static int ipgre_close(struct net_device *dev)
0910 {
0911     struct ip_tunnel *t = netdev_priv(dev);
0912 
0913     if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
0914         struct in_device *in_dev;
0915         in_dev = inetdev_by_index(t->net, t->mlink);
0916         if (in_dev)
0917             ip_mc_dec_group(in_dev, t->parms.iph.daddr);
0918     }
0919     return 0;
0920 }
0921 #endif
0922 
0923 static const struct net_device_ops ipgre_netdev_ops = {
0924     .ndo_init       = ipgre_tunnel_init,
0925     .ndo_uninit     = ip_tunnel_uninit,
0926 #ifdef CONFIG_NET_IPGRE_BROADCAST
0927     .ndo_open       = ipgre_open,
0928     .ndo_stop       = ipgre_close,
0929 #endif
0930     .ndo_start_xmit     = ipgre_xmit,
0931     .ndo_siocdevprivate = ip_tunnel_siocdevprivate,
0932     .ndo_change_mtu     = ip_tunnel_change_mtu,
0933     .ndo_get_stats64    = dev_get_tstats64,
0934     .ndo_get_iflink     = ip_tunnel_get_iflink,
0935     .ndo_tunnel_ctl     = ipgre_tunnel_ctl,
0936 };
0937 
0938 #define GRE_FEATURES (NETIF_F_SG |      \
0939               NETIF_F_FRAGLIST |    \
0940               NETIF_F_HIGHDMA |     \
0941               NETIF_F_HW_CSUM)
0942 
0943 static void ipgre_tunnel_setup(struct net_device *dev)
0944 {
0945     dev->netdev_ops     = &ipgre_netdev_ops;
0946     dev->type       = ARPHRD_IPGRE;
0947     ip_tunnel_setup(dev, ipgre_net_id);
0948 }
0949 
0950 static void __gre_tunnel_init(struct net_device *dev)
0951 {
0952     struct ip_tunnel *tunnel;
0953     __be16 flags;
0954 
0955     tunnel = netdev_priv(dev);
0956     tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
0957     tunnel->parms.iph.protocol = IPPROTO_GRE;
0958 
0959     tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
0960     dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
0961 
0962     dev->features       |= GRE_FEATURES | NETIF_F_LLTX;
0963     dev->hw_features    |= GRE_FEATURES;
0964 
0965     flags = tunnel->parms.o_flags;
0966 
0967     /* TCP offload with GRE SEQ is not supported, nor can we support 2
0968      * levels of outer headers requiring an update.
0969      */
0970     if (flags & TUNNEL_SEQ)
0971         return;
0972     if (flags & TUNNEL_CSUM && tunnel->encap.type != TUNNEL_ENCAP_NONE)
0973         return;
0974 
0975     dev->features |= NETIF_F_GSO_SOFTWARE;
0976     dev->hw_features |= NETIF_F_GSO_SOFTWARE;
0977 }
0978 
0979 static int ipgre_tunnel_init(struct net_device *dev)
0980 {
0981     struct ip_tunnel *tunnel = netdev_priv(dev);
0982     struct iphdr *iph = &tunnel->parms.iph;
0983 
0984     __gre_tunnel_init(dev);
0985 
0986     __dev_addr_set(dev, &iph->saddr, 4);
0987     memcpy(dev->broadcast, &iph->daddr, 4);
0988 
0989     dev->flags      = IFF_NOARP;
0990     netif_keep_dst(dev);
0991     dev->addr_len       = 4;
0992 
0993     if (iph->daddr && !tunnel->collect_md) {
0994 #ifdef CONFIG_NET_IPGRE_BROADCAST
0995         if (ipv4_is_multicast(iph->daddr)) {
0996             if (!iph->saddr)
0997                 return -EINVAL;
0998             dev->flags = IFF_BROADCAST;
0999             dev->header_ops = &ipgre_header_ops;
1000             dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1001             dev->needed_headroom = 0;
1002         }
1003 #endif
1004     } else if (!tunnel->collect_md) {
1005         dev->header_ops = &ipgre_header_ops;
1006         dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1007         dev->needed_headroom = 0;
1008     }
1009 
1010     return ip_tunnel_init(dev);
1011 }
1012 
1013 static const struct gre_protocol ipgre_protocol = {
1014     .handler     = gre_rcv,
1015     .err_handler = gre_err,
1016 };
1017 
1018 static int __net_init ipgre_init_net(struct net *net)
1019 {
1020     return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1021 }
1022 
1023 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1024 {
1025     ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1026 }
1027 
1028 static struct pernet_operations ipgre_net_ops = {
1029     .init = ipgre_init_net,
1030     .exit_batch = ipgre_exit_batch_net,
1031     .id   = &ipgre_net_id,
1032     .size = sizeof(struct ip_tunnel_net),
1033 };
1034 
1035 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1036                  struct netlink_ext_ack *extack)
1037 {
1038     __be16 flags;
1039 
1040     if (!data)
1041         return 0;
1042 
1043     flags = 0;
1044     if (data[IFLA_GRE_IFLAGS])
1045         flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1046     if (data[IFLA_GRE_OFLAGS])
1047         flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1048     if (flags & (GRE_VERSION|GRE_ROUTING))
1049         return -EINVAL;
1050 
1051     if (data[IFLA_GRE_COLLECT_METADATA] &&
1052         data[IFLA_GRE_ENCAP_TYPE] &&
1053         nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1054         return -EINVAL;
1055 
1056     return 0;
1057 }
1058 
1059 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1060                   struct netlink_ext_ack *extack)
1061 {
1062     __be32 daddr;
1063 
1064     if (tb[IFLA_ADDRESS]) {
1065         if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1066             return -EINVAL;
1067         if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1068             return -EADDRNOTAVAIL;
1069     }
1070 
1071     if (!data)
1072         goto out;
1073 
1074     if (data[IFLA_GRE_REMOTE]) {
1075         memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1076         if (!daddr)
1077             return -EINVAL;
1078     }
1079 
1080 out:
1081     return ipgre_tunnel_validate(tb, data, extack);
1082 }
1083 
1084 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1085                struct netlink_ext_ack *extack)
1086 {
1087     __be16 flags = 0;
1088     int ret;
1089 
1090     if (!data)
1091         return 0;
1092 
1093     ret = ipgre_tap_validate(tb, data, extack);
1094     if (ret)
1095         return ret;
1096 
1097     if (data[IFLA_GRE_ERSPAN_VER] &&
1098         nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1099         return 0;
1100 
1101     /* ERSPAN type II/III should only have GRE sequence and key flag */
1102     if (data[IFLA_GRE_OFLAGS])
1103         flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1104     if (data[IFLA_GRE_IFLAGS])
1105         flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1106     if (!data[IFLA_GRE_COLLECT_METADATA] &&
1107         flags != (GRE_SEQ | GRE_KEY))
1108         return -EINVAL;
1109 
1110     /* ERSPAN Session ID only has 10-bit. Since we reuse
1111      * 32-bit key field as ID, check it's range.
1112      */
1113     if (data[IFLA_GRE_IKEY] &&
1114         (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1115         return -EINVAL;
1116 
1117     if (data[IFLA_GRE_OKEY] &&
1118         (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1119         return -EINVAL;
1120 
1121     return 0;
1122 }
1123 
1124 static int ipgre_netlink_parms(struct net_device *dev,
1125                 struct nlattr *data[],
1126                 struct nlattr *tb[],
1127                 struct ip_tunnel_parm *parms,
1128                 __u32 *fwmark)
1129 {
1130     struct ip_tunnel *t = netdev_priv(dev);
1131 
1132     memset(parms, 0, sizeof(*parms));
1133 
1134     parms->iph.protocol = IPPROTO_GRE;
1135 
1136     if (!data)
1137         return 0;
1138 
1139     if (data[IFLA_GRE_LINK])
1140         parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1141 
1142     if (data[IFLA_GRE_IFLAGS])
1143         parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1144 
1145     if (data[IFLA_GRE_OFLAGS])
1146         parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1147 
1148     if (data[IFLA_GRE_IKEY])
1149         parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1150 
1151     if (data[IFLA_GRE_OKEY])
1152         parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1153 
1154     if (data[IFLA_GRE_LOCAL])
1155         parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1156 
1157     if (data[IFLA_GRE_REMOTE])
1158         parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1159 
1160     if (data[IFLA_GRE_TTL])
1161         parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1162 
1163     if (data[IFLA_GRE_TOS])
1164         parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1165 
1166     if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1167         if (t->ignore_df)
1168             return -EINVAL;
1169         parms->iph.frag_off = htons(IP_DF);
1170     }
1171 
1172     if (data[IFLA_GRE_COLLECT_METADATA]) {
1173         t->collect_md = true;
1174         if (dev->type == ARPHRD_IPGRE)
1175             dev->type = ARPHRD_NONE;
1176     }
1177 
1178     if (data[IFLA_GRE_IGNORE_DF]) {
1179         if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1180           && (parms->iph.frag_off & htons(IP_DF)))
1181             return -EINVAL;
1182         t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1183     }
1184 
1185     if (data[IFLA_GRE_FWMARK])
1186         *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1187 
1188     return 0;
1189 }
1190 
1191 static int erspan_netlink_parms(struct net_device *dev,
1192                 struct nlattr *data[],
1193                 struct nlattr *tb[],
1194                 struct ip_tunnel_parm *parms,
1195                 __u32 *fwmark)
1196 {
1197     struct ip_tunnel *t = netdev_priv(dev);
1198     int err;
1199 
1200     err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1201     if (err)
1202         return err;
1203     if (!data)
1204         return 0;
1205 
1206     if (data[IFLA_GRE_ERSPAN_VER]) {
1207         t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1208 
1209         if (t->erspan_ver > 2)
1210             return -EINVAL;
1211     }
1212 
1213     if (t->erspan_ver == 1) {
1214         if (data[IFLA_GRE_ERSPAN_INDEX]) {
1215             t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1216             if (t->index & ~INDEX_MASK)
1217                 return -EINVAL;
1218         }
1219     } else if (t->erspan_ver == 2) {
1220         if (data[IFLA_GRE_ERSPAN_DIR]) {
1221             t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1222             if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1223                 return -EINVAL;
1224         }
1225         if (data[IFLA_GRE_ERSPAN_HWID]) {
1226             t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1227             if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1228                 return -EINVAL;
1229         }
1230     }
1231 
1232     return 0;
1233 }
1234 
1235 /* This function returns true when ENCAP attributes are present in the nl msg */
1236 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1237                       struct ip_tunnel_encap *ipencap)
1238 {
1239     bool ret = false;
1240 
1241     memset(ipencap, 0, sizeof(*ipencap));
1242 
1243     if (!data)
1244         return ret;
1245 
1246     if (data[IFLA_GRE_ENCAP_TYPE]) {
1247         ret = true;
1248         ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1249     }
1250 
1251     if (data[IFLA_GRE_ENCAP_FLAGS]) {
1252         ret = true;
1253         ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1254     }
1255 
1256     if (data[IFLA_GRE_ENCAP_SPORT]) {
1257         ret = true;
1258         ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1259     }
1260 
1261     if (data[IFLA_GRE_ENCAP_DPORT]) {
1262         ret = true;
1263         ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1264     }
1265 
1266     return ret;
1267 }
1268 
1269 static int gre_tap_init(struct net_device *dev)
1270 {
1271     __gre_tunnel_init(dev);
1272     dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1273     netif_keep_dst(dev);
1274 
1275     return ip_tunnel_init(dev);
1276 }
1277 
1278 static const struct net_device_ops gre_tap_netdev_ops = {
1279     .ndo_init       = gre_tap_init,
1280     .ndo_uninit     = ip_tunnel_uninit,
1281     .ndo_start_xmit     = gre_tap_xmit,
1282     .ndo_set_mac_address    = eth_mac_addr,
1283     .ndo_validate_addr  = eth_validate_addr,
1284     .ndo_change_mtu     = ip_tunnel_change_mtu,
1285     .ndo_get_stats64    = dev_get_tstats64,
1286     .ndo_get_iflink     = ip_tunnel_get_iflink,
1287     .ndo_fill_metadata_dst  = gre_fill_metadata_dst,
1288 };
1289 
1290 static int erspan_tunnel_init(struct net_device *dev)
1291 {
1292     struct ip_tunnel *tunnel = netdev_priv(dev);
1293 
1294     if (tunnel->erspan_ver == 0)
1295         tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1296     else
1297         tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1298 
1299     tunnel->parms.iph.protocol = IPPROTO_GRE;
1300     tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1301                erspan_hdr_len(tunnel->erspan_ver);
1302 
1303     dev->features       |= GRE_FEATURES;
1304     dev->hw_features    |= GRE_FEATURES;
1305     dev->priv_flags     |= IFF_LIVE_ADDR_CHANGE;
1306     netif_keep_dst(dev);
1307 
1308     return ip_tunnel_init(dev);
1309 }
1310 
1311 static const struct net_device_ops erspan_netdev_ops = {
1312     .ndo_init       = erspan_tunnel_init,
1313     .ndo_uninit     = ip_tunnel_uninit,
1314     .ndo_start_xmit     = erspan_xmit,
1315     .ndo_set_mac_address    = eth_mac_addr,
1316     .ndo_validate_addr  = eth_validate_addr,
1317     .ndo_change_mtu     = ip_tunnel_change_mtu,
1318     .ndo_get_stats64    = dev_get_tstats64,
1319     .ndo_get_iflink     = ip_tunnel_get_iflink,
1320     .ndo_fill_metadata_dst  = gre_fill_metadata_dst,
1321 };
1322 
1323 static void ipgre_tap_setup(struct net_device *dev)
1324 {
1325     ether_setup(dev);
1326     dev->max_mtu = 0;
1327     dev->netdev_ops = &gre_tap_netdev_ops;
1328     dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1329     dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1330     ip_tunnel_setup(dev, gre_tap_net_id);
1331 }
1332 
1333 static int
1334 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1335 {
1336     struct ip_tunnel_encap ipencap;
1337 
1338     if (ipgre_netlink_encap_parms(data, &ipencap)) {
1339         struct ip_tunnel *t = netdev_priv(dev);
1340         int err = ip_tunnel_encap_setup(t, &ipencap);
1341 
1342         if (err < 0)
1343             return err;
1344     }
1345 
1346     return 0;
1347 }
1348 
1349 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1350              struct nlattr *tb[], struct nlattr *data[],
1351              struct netlink_ext_ack *extack)
1352 {
1353     struct ip_tunnel_parm p;
1354     __u32 fwmark = 0;
1355     int err;
1356 
1357     err = ipgre_newlink_encap_setup(dev, data);
1358     if (err)
1359         return err;
1360 
1361     err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1362     if (err < 0)
1363         return err;
1364     return ip_tunnel_newlink(dev, tb, &p, fwmark);
1365 }
1366 
1367 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1368               struct nlattr *tb[], struct nlattr *data[],
1369               struct netlink_ext_ack *extack)
1370 {
1371     struct ip_tunnel_parm p;
1372     __u32 fwmark = 0;
1373     int err;
1374 
1375     err = ipgre_newlink_encap_setup(dev, data);
1376     if (err)
1377         return err;
1378 
1379     err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1380     if (err)
1381         return err;
1382     return ip_tunnel_newlink(dev, tb, &p, fwmark);
1383 }
1384 
1385 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1386                 struct nlattr *data[],
1387                 struct netlink_ext_ack *extack)
1388 {
1389     struct ip_tunnel *t = netdev_priv(dev);
1390     __u32 fwmark = t->fwmark;
1391     struct ip_tunnel_parm p;
1392     int err;
1393 
1394     err = ipgre_newlink_encap_setup(dev, data);
1395     if (err)
1396         return err;
1397 
1398     err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1399     if (err < 0)
1400         return err;
1401 
1402     err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1403     if (err < 0)
1404         return err;
1405 
1406     t->parms.i_flags = p.i_flags;
1407     t->parms.o_flags = p.o_flags;
1408 
1409     ipgre_link_update(dev, !tb[IFLA_MTU]);
1410 
1411     return 0;
1412 }
1413 
1414 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1415                  struct nlattr *data[],
1416                  struct netlink_ext_ack *extack)
1417 {
1418     struct ip_tunnel *t = netdev_priv(dev);
1419     __u32 fwmark = t->fwmark;
1420     struct ip_tunnel_parm p;
1421     int err;
1422 
1423     err = ipgre_newlink_encap_setup(dev, data);
1424     if (err)
1425         return err;
1426 
1427     err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1428     if (err < 0)
1429         return err;
1430 
1431     err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1432     if (err < 0)
1433         return err;
1434 
1435     t->parms.i_flags = p.i_flags;
1436     t->parms.o_flags = p.o_flags;
1437 
1438     return 0;
1439 }
1440 
1441 static size_t ipgre_get_size(const struct net_device *dev)
1442 {
1443     return
1444         /* IFLA_GRE_LINK */
1445         nla_total_size(4) +
1446         /* IFLA_GRE_IFLAGS */
1447         nla_total_size(2) +
1448         /* IFLA_GRE_OFLAGS */
1449         nla_total_size(2) +
1450         /* IFLA_GRE_IKEY */
1451         nla_total_size(4) +
1452         /* IFLA_GRE_OKEY */
1453         nla_total_size(4) +
1454         /* IFLA_GRE_LOCAL */
1455         nla_total_size(4) +
1456         /* IFLA_GRE_REMOTE */
1457         nla_total_size(4) +
1458         /* IFLA_GRE_TTL */
1459         nla_total_size(1) +
1460         /* IFLA_GRE_TOS */
1461         nla_total_size(1) +
1462         /* IFLA_GRE_PMTUDISC */
1463         nla_total_size(1) +
1464         /* IFLA_GRE_ENCAP_TYPE */
1465         nla_total_size(2) +
1466         /* IFLA_GRE_ENCAP_FLAGS */
1467         nla_total_size(2) +
1468         /* IFLA_GRE_ENCAP_SPORT */
1469         nla_total_size(2) +
1470         /* IFLA_GRE_ENCAP_DPORT */
1471         nla_total_size(2) +
1472         /* IFLA_GRE_COLLECT_METADATA */
1473         nla_total_size(0) +
1474         /* IFLA_GRE_IGNORE_DF */
1475         nla_total_size(1) +
1476         /* IFLA_GRE_FWMARK */
1477         nla_total_size(4) +
1478         /* IFLA_GRE_ERSPAN_INDEX */
1479         nla_total_size(4) +
1480         /* IFLA_GRE_ERSPAN_VER */
1481         nla_total_size(1) +
1482         /* IFLA_GRE_ERSPAN_DIR */
1483         nla_total_size(1) +
1484         /* IFLA_GRE_ERSPAN_HWID */
1485         nla_total_size(2) +
1486         0;
1487 }
1488 
1489 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1490 {
1491     struct ip_tunnel *t = netdev_priv(dev);
1492     struct ip_tunnel_parm *p = &t->parms;
1493     __be16 o_flags = p->o_flags;
1494 
1495     if (t->erspan_ver <= 2) {
1496         if (t->erspan_ver != 0 && !t->collect_md)
1497             o_flags |= TUNNEL_KEY;
1498 
1499         if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1500             goto nla_put_failure;
1501 
1502         if (t->erspan_ver == 1) {
1503             if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1504                 goto nla_put_failure;
1505         } else if (t->erspan_ver == 2) {
1506             if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1507                 goto nla_put_failure;
1508             if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1509                 goto nla_put_failure;
1510         }
1511     }
1512 
1513     if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1514         nla_put_be16(skb, IFLA_GRE_IFLAGS,
1515              gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1516         nla_put_be16(skb, IFLA_GRE_OFLAGS,
1517              gre_tnl_flags_to_gre_flags(o_flags)) ||
1518         nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1519         nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1520         nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1521         nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1522         nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1523         nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1524         nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1525                !!(p->iph.frag_off & htons(IP_DF))) ||
1526         nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1527         goto nla_put_failure;
1528 
1529     if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1530             t->encap.type) ||
1531         nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1532              t->encap.sport) ||
1533         nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1534              t->encap.dport) ||
1535         nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1536             t->encap.flags))
1537         goto nla_put_failure;
1538 
1539     if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1540         goto nla_put_failure;
1541 
1542     if (t->collect_md) {
1543         if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1544             goto nla_put_failure;
1545     }
1546 
1547     return 0;
1548 
1549 nla_put_failure:
1550     return -EMSGSIZE;
1551 }
1552 
1553 static void erspan_setup(struct net_device *dev)
1554 {
1555     struct ip_tunnel *t = netdev_priv(dev);
1556 
1557     ether_setup(dev);
1558     dev->max_mtu = 0;
1559     dev->netdev_ops = &erspan_netdev_ops;
1560     dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1561     dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1562     ip_tunnel_setup(dev, erspan_net_id);
1563     t->erspan_ver = 1;
1564 }
1565 
1566 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1567     [IFLA_GRE_LINK]     = { .type = NLA_U32 },
1568     [IFLA_GRE_IFLAGS]   = { .type = NLA_U16 },
1569     [IFLA_GRE_OFLAGS]   = { .type = NLA_U16 },
1570     [IFLA_GRE_IKEY]     = { .type = NLA_U32 },
1571     [IFLA_GRE_OKEY]     = { .type = NLA_U32 },
1572     [IFLA_GRE_LOCAL]    = { .len = sizeof_field(struct iphdr, saddr) },
1573     [IFLA_GRE_REMOTE]   = { .len = sizeof_field(struct iphdr, daddr) },
1574     [IFLA_GRE_TTL]      = { .type = NLA_U8 },
1575     [IFLA_GRE_TOS]      = { .type = NLA_U8 },
1576     [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1577     [IFLA_GRE_ENCAP_TYPE]   = { .type = NLA_U16 },
1578     [IFLA_GRE_ENCAP_FLAGS]  = { .type = NLA_U16 },
1579     [IFLA_GRE_ENCAP_SPORT]  = { .type = NLA_U16 },
1580     [IFLA_GRE_ENCAP_DPORT]  = { .type = NLA_U16 },
1581     [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1582     [IFLA_GRE_IGNORE_DF]    = { .type = NLA_U8 },
1583     [IFLA_GRE_FWMARK]   = { .type = NLA_U32 },
1584     [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1585     [IFLA_GRE_ERSPAN_VER]   = { .type = NLA_U8 },
1586     [IFLA_GRE_ERSPAN_DIR]   = { .type = NLA_U8 },
1587     [IFLA_GRE_ERSPAN_HWID]  = { .type = NLA_U16 },
1588 };
1589 
1590 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1591     .kind       = "gre",
1592     .maxtype    = IFLA_GRE_MAX,
1593     .policy     = ipgre_policy,
1594     .priv_size  = sizeof(struct ip_tunnel),
1595     .setup      = ipgre_tunnel_setup,
1596     .validate   = ipgre_tunnel_validate,
1597     .newlink    = ipgre_newlink,
1598     .changelink = ipgre_changelink,
1599     .dellink    = ip_tunnel_dellink,
1600     .get_size   = ipgre_get_size,
1601     .fill_info  = ipgre_fill_info,
1602     .get_link_net   = ip_tunnel_get_link_net,
1603 };
1604 
1605 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1606     .kind       = "gretap",
1607     .maxtype    = IFLA_GRE_MAX,
1608     .policy     = ipgre_policy,
1609     .priv_size  = sizeof(struct ip_tunnel),
1610     .setup      = ipgre_tap_setup,
1611     .validate   = ipgre_tap_validate,
1612     .newlink    = ipgre_newlink,
1613     .changelink = ipgre_changelink,
1614     .dellink    = ip_tunnel_dellink,
1615     .get_size   = ipgre_get_size,
1616     .fill_info  = ipgre_fill_info,
1617     .get_link_net   = ip_tunnel_get_link_net,
1618 };
1619 
1620 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1621     .kind       = "erspan",
1622     .maxtype    = IFLA_GRE_MAX,
1623     .policy     = ipgre_policy,
1624     .priv_size  = sizeof(struct ip_tunnel),
1625     .setup      = erspan_setup,
1626     .validate   = erspan_validate,
1627     .newlink    = erspan_newlink,
1628     .changelink = erspan_changelink,
1629     .dellink    = ip_tunnel_dellink,
1630     .get_size   = ipgre_get_size,
1631     .fill_info  = ipgre_fill_info,
1632     .get_link_net   = ip_tunnel_get_link_net,
1633 };
1634 
1635 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1636                     u8 name_assign_type)
1637 {
1638     struct nlattr *tb[IFLA_MAX + 1];
1639     struct net_device *dev;
1640     LIST_HEAD(list_kill);
1641     struct ip_tunnel *t;
1642     int err;
1643 
1644     memset(&tb, 0, sizeof(tb));
1645 
1646     dev = rtnl_create_link(net, name, name_assign_type,
1647                    &ipgre_tap_ops, tb, NULL);
1648     if (IS_ERR(dev))
1649         return dev;
1650 
1651     /* Configure flow based GRE device. */
1652     t = netdev_priv(dev);
1653     t->collect_md = true;
1654 
1655     err = ipgre_newlink(net, dev, tb, NULL, NULL);
1656     if (err < 0) {
1657         free_netdev(dev);
1658         return ERR_PTR(err);
1659     }
1660 
1661     /* openvswitch users expect packet sizes to be unrestricted,
1662      * so set the largest MTU we can.
1663      */
1664     err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1665     if (err)
1666         goto out;
1667 
1668     err = rtnl_configure_link(dev, NULL);
1669     if (err < 0)
1670         goto out;
1671 
1672     return dev;
1673 out:
1674     ip_tunnel_dellink(dev, &list_kill);
1675     unregister_netdevice_many(&list_kill);
1676     return ERR_PTR(err);
1677 }
1678 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1679 
1680 static int __net_init ipgre_tap_init_net(struct net *net)
1681 {
1682     return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1683 }
1684 
1685 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1686 {
1687     ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1688 }
1689 
1690 static struct pernet_operations ipgre_tap_net_ops = {
1691     .init = ipgre_tap_init_net,
1692     .exit_batch = ipgre_tap_exit_batch_net,
1693     .id   = &gre_tap_net_id,
1694     .size = sizeof(struct ip_tunnel_net),
1695 };
1696 
1697 static int __net_init erspan_init_net(struct net *net)
1698 {
1699     return ip_tunnel_init_net(net, erspan_net_id,
1700                   &erspan_link_ops, "erspan0");
1701 }
1702 
1703 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1704 {
1705     ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1706 }
1707 
1708 static struct pernet_operations erspan_net_ops = {
1709     .init = erspan_init_net,
1710     .exit_batch = erspan_exit_batch_net,
1711     .id   = &erspan_net_id,
1712     .size = sizeof(struct ip_tunnel_net),
1713 };
1714 
1715 static int __init ipgre_init(void)
1716 {
1717     int err;
1718 
1719     pr_info("GRE over IPv4 tunneling driver\n");
1720 
1721     err = register_pernet_device(&ipgre_net_ops);
1722     if (err < 0)
1723         return err;
1724 
1725     err = register_pernet_device(&ipgre_tap_net_ops);
1726     if (err < 0)
1727         goto pnet_tap_failed;
1728 
1729     err = register_pernet_device(&erspan_net_ops);
1730     if (err < 0)
1731         goto pnet_erspan_failed;
1732 
1733     err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1734     if (err < 0) {
1735         pr_info("%s: can't add protocol\n", __func__);
1736         goto add_proto_failed;
1737     }
1738 
1739     err = rtnl_link_register(&ipgre_link_ops);
1740     if (err < 0)
1741         goto rtnl_link_failed;
1742 
1743     err = rtnl_link_register(&ipgre_tap_ops);
1744     if (err < 0)
1745         goto tap_ops_failed;
1746 
1747     err = rtnl_link_register(&erspan_link_ops);
1748     if (err < 0)
1749         goto erspan_link_failed;
1750 
1751     return 0;
1752 
1753 erspan_link_failed:
1754     rtnl_link_unregister(&ipgre_tap_ops);
1755 tap_ops_failed:
1756     rtnl_link_unregister(&ipgre_link_ops);
1757 rtnl_link_failed:
1758     gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1759 add_proto_failed:
1760     unregister_pernet_device(&erspan_net_ops);
1761 pnet_erspan_failed:
1762     unregister_pernet_device(&ipgre_tap_net_ops);
1763 pnet_tap_failed:
1764     unregister_pernet_device(&ipgre_net_ops);
1765     return err;
1766 }
1767 
1768 static void __exit ipgre_fini(void)
1769 {
1770     rtnl_link_unregister(&ipgre_tap_ops);
1771     rtnl_link_unregister(&ipgre_link_ops);
1772     rtnl_link_unregister(&erspan_link_ops);
1773     gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1774     unregister_pernet_device(&ipgre_tap_net_ops);
1775     unregister_pernet_device(&ipgre_net_ops);
1776     unregister_pernet_device(&erspan_net_ops);
1777 }
1778 
1779 module_init(ipgre_init);
1780 module_exit(ipgre_fini);
1781 MODULE_LICENSE("GPL");
1782 MODULE_ALIAS_RTNL_LINK("gre");
1783 MODULE_ALIAS_RTNL_LINK("gretap");
1784 MODULE_ALIAS_RTNL_LINK("erspan");
1785 MODULE_ALIAS_NETDEV("gre0");
1786 MODULE_ALIAS_NETDEV("gretap0");
1787 MODULE_ALIAS_NETDEV("erspan0");