Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  *  IPV4 GSO/GRO offload support
0004  *  Linux INET implementation
0005  *
0006  *  GRE GSO support
0007  */
0008 
0009 #include <linux/skbuff.h>
0010 #include <linux/init.h>
0011 #include <net/protocol.h>
0012 #include <net/gre.h>
0013 #include <net/gro.h>
0014 
0015 static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
0016                        netdev_features_t features)
0017 {
0018     int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
0019     bool need_csum, offload_csum, gso_partial, need_ipsec;
0020     struct sk_buff *segs = ERR_PTR(-EINVAL);
0021     u16 mac_offset = skb->mac_header;
0022     __be16 protocol = skb->protocol;
0023     u16 mac_len = skb->mac_len;
0024     int gre_offset, outer_hlen;
0025 
0026     if (!skb->encapsulation)
0027         goto out;
0028 
0029     if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
0030         goto out;
0031 
0032     if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
0033         goto out;
0034 
0035     /* setup inner skb. */
0036     skb->encapsulation = 0;
0037     SKB_GSO_CB(skb)->encap_level = 0;
0038     __skb_pull(skb, tnl_hlen);
0039     skb_reset_mac_header(skb);
0040     skb_set_network_header(skb, skb_inner_network_offset(skb));
0041     skb->mac_len = skb_inner_network_offset(skb);
0042     skb->protocol = skb->inner_protocol;
0043 
0044     need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
0045     skb->encap_hdr_csum = need_csum;
0046 
0047     features &= skb->dev->hw_enc_features;
0048     if (need_csum)
0049         features &= ~NETIF_F_SCTP_CRC;
0050 
0051     need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
0052     /* Try to offload checksum if possible */
0053     offload_csum = !!(need_csum && !need_ipsec &&
0054               (skb->dev->features & NETIF_F_HW_CSUM));
0055 
0056     /* segment inner packet. */
0057     segs = skb_mac_gso_segment(skb, features);
0058     if (IS_ERR_OR_NULL(segs)) {
0059         skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
0060                      mac_len);
0061         goto out;
0062     }
0063 
0064     gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
0065 
0066     outer_hlen = skb_tnl_header_len(skb);
0067     gre_offset = outer_hlen - tnl_hlen;
0068     skb = segs;
0069     do {
0070         struct gre_base_hdr *greh;
0071         __sum16 *pcsum;
0072 
0073         /* Set up inner headers if we are offloading inner checksum */
0074         if (skb->ip_summed == CHECKSUM_PARTIAL) {
0075             skb_reset_inner_headers(skb);
0076             skb->encapsulation = 1;
0077         }
0078 
0079         skb->mac_len = mac_len;
0080         skb->protocol = protocol;
0081 
0082         __skb_push(skb, outer_hlen);
0083         skb_reset_mac_header(skb);
0084         skb_set_network_header(skb, mac_len);
0085         skb_set_transport_header(skb, gre_offset);
0086 
0087         if (!need_csum)
0088             continue;
0089 
0090         greh = (struct gre_base_hdr *)skb_transport_header(skb);
0091         pcsum = (__sum16 *)(greh + 1);
0092 
0093         if (gso_partial && skb_is_gso(skb)) {
0094             unsigned int partial_adj;
0095 
0096             /* Adjust checksum to account for the fact that
0097              * the partial checksum is based on actual size
0098              * whereas headers should be based on MSS size.
0099              */
0100             partial_adj = skb->len + skb_headroom(skb) -
0101                       SKB_GSO_CB(skb)->data_offset -
0102                       skb_shinfo(skb)->gso_size;
0103             *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
0104         } else {
0105             *pcsum = 0;
0106         }
0107 
0108         *(pcsum + 1) = 0;
0109         if (skb->encapsulation || !offload_csum) {
0110             *pcsum = gso_make_checksum(skb, 0);
0111         } else {
0112             skb->ip_summed = CHECKSUM_PARTIAL;
0113             skb->csum_start = skb_transport_header(skb) - skb->head;
0114             skb->csum_offset = sizeof(*greh);
0115         }
0116     } while ((skb = skb->next));
0117 out:
0118     return segs;
0119 }
0120 
0121 static struct sk_buff *gre_gro_receive(struct list_head *head,
0122                        struct sk_buff *skb)
0123 {
0124     struct sk_buff *pp = NULL;
0125     struct sk_buff *p;
0126     const struct gre_base_hdr *greh;
0127     unsigned int hlen, grehlen;
0128     unsigned int off;
0129     int flush = 1;
0130     struct packet_offload *ptype;
0131     __be16 type;
0132 
0133     if (NAPI_GRO_CB(skb)->encap_mark)
0134         goto out;
0135 
0136     NAPI_GRO_CB(skb)->encap_mark = 1;
0137 
0138     off = skb_gro_offset(skb);
0139     hlen = off + sizeof(*greh);
0140     greh = skb_gro_header_fast(skb, off);
0141     if (skb_gro_header_hard(skb, hlen)) {
0142         greh = skb_gro_header_slow(skb, hlen, off);
0143         if (unlikely(!greh))
0144             goto out;
0145     }
0146 
0147     /* Only support version 0 and K (key), C (csum) flags. Note that
0148      * although the support for the S (seq#) flag can be added easily
0149      * for GRO, this is problematic for GSO hence can not be enabled
0150      * here because a GRO pkt may end up in the forwarding path, thus
0151      * requiring GSO support to break it up correctly.
0152      */
0153     if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
0154         goto out;
0155 
0156     /* We can only support GRE_CSUM if we can track the location of
0157      * the GRE header.  In the case of FOU/GUE we cannot because the
0158      * outer UDP header displaces the GRE header leaving us in a state
0159      * of limbo.
0160      */
0161     if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
0162         goto out;
0163 
0164     type = greh->protocol;
0165 
0166     ptype = gro_find_receive_by_type(type);
0167     if (!ptype)
0168         goto out;
0169 
0170     grehlen = GRE_HEADER_SECTION;
0171 
0172     if (greh->flags & GRE_KEY)
0173         grehlen += GRE_HEADER_SECTION;
0174 
0175     if (greh->flags & GRE_CSUM)
0176         grehlen += GRE_HEADER_SECTION;
0177 
0178     hlen = off + grehlen;
0179     if (skb_gro_header_hard(skb, hlen)) {
0180         greh = skb_gro_header_slow(skb, hlen, off);
0181         if (unlikely(!greh))
0182             goto out;
0183     }
0184 
0185     /* Don't bother verifying checksum if we're going to flush anyway. */
0186     if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
0187         if (skb_gro_checksum_simple_validate(skb))
0188             goto out;
0189 
0190         skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
0191                          null_compute_pseudo);
0192     }
0193 
0194     list_for_each_entry(p, head, list) {
0195         const struct gre_base_hdr *greh2;
0196 
0197         if (!NAPI_GRO_CB(p)->same_flow)
0198             continue;
0199 
0200         /* The following checks are needed to ensure only pkts
0201          * from the same tunnel are considered for aggregation.
0202          * The criteria for "the same tunnel" includes:
0203          * 1) same version (we only support version 0 here)
0204          * 2) same protocol (we only support ETH_P_IP for now)
0205          * 3) same set of flags
0206          * 4) same key if the key field is present.
0207          */
0208         greh2 = (struct gre_base_hdr *)(p->data + off);
0209 
0210         if (greh2->flags != greh->flags ||
0211             greh2->protocol != greh->protocol) {
0212             NAPI_GRO_CB(p)->same_flow = 0;
0213             continue;
0214         }
0215         if (greh->flags & GRE_KEY) {
0216             /* compare keys */
0217             if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
0218                 NAPI_GRO_CB(p)->same_flow = 0;
0219                 continue;
0220             }
0221         }
0222     }
0223 
0224     skb_gro_pull(skb, grehlen);
0225 
0226     /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
0227     skb_gro_postpull_rcsum(skb, greh, grehlen);
0228 
0229     pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
0230     flush = 0;
0231 
0232 out:
0233     skb_gro_flush_final(skb, pp, flush);
0234 
0235     return pp;
0236 }
0237 
0238 static int gre_gro_complete(struct sk_buff *skb, int nhoff)
0239 {
0240     struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
0241     struct packet_offload *ptype;
0242     unsigned int grehlen = sizeof(*greh);
0243     int err = -ENOENT;
0244     __be16 type;
0245 
0246     skb->encapsulation = 1;
0247     skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
0248 
0249     type = greh->protocol;
0250     if (greh->flags & GRE_KEY)
0251         grehlen += GRE_HEADER_SECTION;
0252 
0253     if (greh->flags & GRE_CSUM)
0254         grehlen += GRE_HEADER_SECTION;
0255 
0256     ptype = gro_find_complete_by_type(type);
0257     if (ptype)
0258         err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
0259 
0260     skb_set_inner_mac_header(skb, nhoff + grehlen);
0261 
0262     return err;
0263 }
0264 
0265 static const struct net_offload gre_offload = {
0266     .callbacks = {
0267         .gso_segment = gre_gso_segment,
0268         .gro_receive = gre_gro_receive,
0269         .gro_complete = gre_gro_complete,
0270     },
0271 };
0272 
0273 static int __init gre_offload_init(void)
0274 {
0275     int err;
0276 
0277     err = inet_add_offload(&gre_offload, IPPROTO_GRE);
0278 #if IS_ENABLED(CONFIG_IPV6)
0279     if (err)
0280         return err;
0281 
0282     err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
0283     if (err)
0284         inet_del_offload(&gre_offload, IPPROTO_GRE);
0285 #endif
0286 
0287     return err;
0288 }
0289 device_initcall(gre_offload_init);