Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 #include <linux/kernel.h>
0003 #include <linux/skbuff.h>
0004 #include <linux/export.h>
0005 #include <linux/ip.h>
0006 #include <linux/ipv6.h>
0007 #include <linux/if_vlan.h>
0008 #include <linux/filter.h>
0009 #include <net/dsa.h>
0010 #include <net/dst_metadata.h>
0011 #include <net/ip.h>
0012 #include <net/ipv6.h>
0013 #include <net/gre.h>
0014 #include <net/pptp.h>
0015 #include <net/tipc.h>
0016 #include <linux/igmp.h>
0017 #include <linux/icmp.h>
0018 #include <linux/sctp.h>
0019 #include <linux/dccp.h>
0020 #include <linux/if_tunnel.h>
0021 #include <linux/if_pppox.h>
0022 #include <linux/ppp_defs.h>
0023 #include <linux/stddef.h>
0024 #include <linux/if_ether.h>
0025 #include <linux/if_hsr.h>
0026 #include <linux/mpls.h>
0027 #include <linux/tcp.h>
0028 #include <linux/ptp_classify.h>
0029 #include <net/flow_dissector.h>
0030 #include <scsi/fc/fc_fcoe.h>
0031 #include <uapi/linux/batadv_packet.h>
0032 #include <linux/bpf.h>
0033 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
0034 #include <net/netfilter/nf_conntrack_core.h>
0035 #include <net/netfilter/nf_conntrack_labels.h>
0036 #endif
0037 #include <linux/bpf-netns.h>
0038 
0039 static void dissector_set_key(struct flow_dissector *flow_dissector,
0040                   enum flow_dissector_key_id key_id)
0041 {
0042     flow_dissector->used_keys |= (1 << key_id);
0043 }
0044 
0045 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
0046                  const struct flow_dissector_key *key,
0047                  unsigned int key_count)
0048 {
0049     unsigned int i;
0050 
0051     memset(flow_dissector, 0, sizeof(*flow_dissector));
0052 
0053     for (i = 0; i < key_count; i++, key++) {
0054         /* User should make sure that every key target offset is within
0055          * boundaries of unsigned short.
0056          */
0057         BUG_ON(key->offset > USHRT_MAX);
0058         BUG_ON(dissector_uses_key(flow_dissector,
0059                       key->key_id));
0060 
0061         dissector_set_key(flow_dissector, key->key_id);
0062         flow_dissector->offset[key->key_id] = key->offset;
0063     }
0064 
0065     /* Ensure that the dissector always includes control and basic key.
0066      * That way we are able to avoid handling lack of these in fast path.
0067      */
0068     BUG_ON(!dissector_uses_key(flow_dissector,
0069                    FLOW_DISSECTOR_KEY_CONTROL));
0070     BUG_ON(!dissector_uses_key(flow_dissector,
0071                    FLOW_DISSECTOR_KEY_BASIC));
0072 }
0073 EXPORT_SYMBOL(skb_flow_dissector_init);
0074 
0075 #ifdef CONFIG_BPF_SYSCALL
0076 int flow_dissector_bpf_prog_attach_check(struct net *net,
0077                      struct bpf_prog *prog)
0078 {
0079     enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
0080 
0081     if (net == &init_net) {
0082         /* BPF flow dissector in the root namespace overrides
0083          * any per-net-namespace one. When attaching to root,
0084          * make sure we don't have any BPF program attached
0085          * to the non-root namespaces.
0086          */
0087         struct net *ns;
0088 
0089         for_each_net(ns) {
0090             if (ns == &init_net)
0091                 continue;
0092             if (rcu_access_pointer(ns->bpf.run_array[type]))
0093                 return -EEXIST;
0094         }
0095     } else {
0096         /* Make sure root flow dissector is not attached
0097          * when attaching to the non-root namespace.
0098          */
0099         if (rcu_access_pointer(init_net.bpf.run_array[type]))
0100             return -EEXIST;
0101     }
0102 
0103     return 0;
0104 }
0105 #endif /* CONFIG_BPF_SYSCALL */
0106 
0107 /**
0108  * __skb_flow_get_ports - extract the upper layer ports and return them
0109  * @skb: sk_buff to extract the ports from
0110  * @thoff: transport header offset
0111  * @ip_proto: protocol for which to get port offset
0112  * @data: raw buffer pointer to the packet, if NULL use skb->data
0113  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
0114  *
0115  * The function will try to retrieve the ports at offset thoff + poff where poff
0116  * is the protocol port offset returned from proto_ports_offset
0117  */
0118 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
0119                 const void *data, int hlen)
0120 {
0121     int poff = proto_ports_offset(ip_proto);
0122 
0123     if (!data) {
0124         data = skb->data;
0125         hlen = skb_headlen(skb);
0126     }
0127 
0128     if (poff >= 0) {
0129         __be32 *ports, _ports;
0130 
0131         ports = __skb_header_pointer(skb, thoff + poff,
0132                          sizeof(_ports), data, hlen, &_ports);
0133         if (ports)
0134             return *ports;
0135     }
0136 
0137     return 0;
0138 }
0139 EXPORT_SYMBOL(__skb_flow_get_ports);
0140 
0141 static bool icmp_has_id(u8 type)
0142 {
0143     switch (type) {
0144     case ICMP_ECHO:
0145     case ICMP_ECHOREPLY:
0146     case ICMP_TIMESTAMP:
0147     case ICMP_TIMESTAMPREPLY:
0148     case ICMPV6_ECHO_REQUEST:
0149     case ICMPV6_ECHO_REPLY:
0150         return true;
0151     }
0152 
0153     return false;
0154 }
0155 
0156 /**
0157  * skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields
0158  * @skb: sk_buff to extract from
0159  * @key_icmp: struct flow_dissector_key_icmp to fill
0160  * @data: raw buffer pointer to the packet
0161  * @thoff: offset to extract at
0162  * @hlen: packet header length
0163  */
0164 void skb_flow_get_icmp_tci(const struct sk_buff *skb,
0165                struct flow_dissector_key_icmp *key_icmp,
0166                const void *data, int thoff, int hlen)
0167 {
0168     struct icmphdr *ih, _ih;
0169 
0170     ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih);
0171     if (!ih)
0172         return;
0173 
0174     key_icmp->type = ih->type;
0175     key_icmp->code = ih->code;
0176 
0177     /* As we use 0 to signal that the Id field is not present,
0178      * avoid confusion with packets without such field
0179      */
0180     if (icmp_has_id(ih->type))
0181         key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
0182     else
0183         key_icmp->id = 0;
0184 }
0185 EXPORT_SYMBOL(skb_flow_get_icmp_tci);
0186 
0187 /* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet
0188  * using skb_flow_get_icmp_tci().
0189  */
0190 static void __skb_flow_dissect_icmp(const struct sk_buff *skb,
0191                     struct flow_dissector *flow_dissector,
0192                     void *target_container, const void *data,
0193                     int thoff, int hlen)
0194 {
0195     struct flow_dissector_key_icmp *key_icmp;
0196 
0197     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP))
0198         return;
0199 
0200     key_icmp = skb_flow_dissector_target(flow_dissector,
0201                          FLOW_DISSECTOR_KEY_ICMP,
0202                          target_container);
0203 
0204     skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen);
0205 }
0206 
0207 void skb_flow_dissect_meta(const struct sk_buff *skb,
0208                struct flow_dissector *flow_dissector,
0209                void *target_container)
0210 {
0211     struct flow_dissector_key_meta *meta;
0212 
0213     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_META))
0214         return;
0215 
0216     meta = skb_flow_dissector_target(flow_dissector,
0217                      FLOW_DISSECTOR_KEY_META,
0218                      target_container);
0219     meta->ingress_ifindex = skb->skb_iif;
0220 }
0221 EXPORT_SYMBOL(skb_flow_dissect_meta);
0222 
0223 static void
0224 skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
0225                    struct flow_dissector *flow_dissector,
0226                    void *target_container)
0227 {
0228     struct flow_dissector_key_control *ctrl;
0229 
0230     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
0231         return;
0232 
0233     ctrl = skb_flow_dissector_target(flow_dissector,
0234                      FLOW_DISSECTOR_KEY_ENC_CONTROL,
0235                      target_container);
0236     ctrl->addr_type = type;
0237 }
0238 
0239 void
0240 skb_flow_dissect_ct(const struct sk_buff *skb,
0241             struct flow_dissector *flow_dissector,
0242             void *target_container, u16 *ctinfo_map,
0243             size_t mapsize, bool post_ct, u16 zone)
0244 {
0245 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
0246     struct flow_dissector_key_ct *key;
0247     enum ip_conntrack_info ctinfo;
0248     struct nf_conn_labels *cl;
0249     struct nf_conn *ct;
0250 
0251     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CT))
0252         return;
0253 
0254     ct = nf_ct_get(skb, &ctinfo);
0255     if (!ct && !post_ct)
0256         return;
0257 
0258     key = skb_flow_dissector_target(flow_dissector,
0259                     FLOW_DISSECTOR_KEY_CT,
0260                     target_container);
0261 
0262     if (!ct) {
0263         key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
0264                 TCA_FLOWER_KEY_CT_FLAGS_INVALID;
0265         key->ct_zone = zone;
0266         return;
0267     }
0268 
0269     if (ctinfo < mapsize)
0270         key->ct_state = ctinfo_map[ctinfo];
0271 #if IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)
0272     key->ct_zone = ct->zone.id;
0273 #endif
0274 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
0275     key->ct_mark = ct->mark;
0276 #endif
0277 
0278     cl = nf_ct_labels_find(ct);
0279     if (cl)
0280         memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels));
0281 #endif /* CONFIG_NF_CONNTRACK */
0282 }
0283 EXPORT_SYMBOL(skb_flow_dissect_ct);
0284 
0285 void
0286 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
0287                  struct flow_dissector *flow_dissector,
0288                  void *target_container)
0289 {
0290     struct ip_tunnel_info *info;
0291     struct ip_tunnel_key *key;
0292 
0293     /* A quick check to see if there might be something to do. */
0294     if (!dissector_uses_key(flow_dissector,
0295                 FLOW_DISSECTOR_KEY_ENC_KEYID) &&
0296         !dissector_uses_key(flow_dissector,
0297                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
0298         !dissector_uses_key(flow_dissector,
0299                 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
0300         !dissector_uses_key(flow_dissector,
0301                 FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
0302         !dissector_uses_key(flow_dissector,
0303                 FLOW_DISSECTOR_KEY_ENC_PORTS) &&
0304         !dissector_uses_key(flow_dissector,
0305                 FLOW_DISSECTOR_KEY_ENC_IP) &&
0306         !dissector_uses_key(flow_dissector,
0307                 FLOW_DISSECTOR_KEY_ENC_OPTS))
0308         return;
0309 
0310     info = skb_tunnel_info(skb);
0311     if (!info)
0312         return;
0313 
0314     key = &info->key;
0315 
0316     switch (ip_tunnel_info_af(info)) {
0317     case AF_INET:
0318         skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
0319                            flow_dissector,
0320                            target_container);
0321         if (dissector_uses_key(flow_dissector,
0322                        FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
0323             struct flow_dissector_key_ipv4_addrs *ipv4;
0324 
0325             ipv4 = skb_flow_dissector_target(flow_dissector,
0326                              FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
0327                              target_container);
0328             ipv4->src = key->u.ipv4.src;
0329             ipv4->dst = key->u.ipv4.dst;
0330         }
0331         break;
0332     case AF_INET6:
0333         skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
0334                            flow_dissector,
0335                            target_container);
0336         if (dissector_uses_key(flow_dissector,
0337                        FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
0338             struct flow_dissector_key_ipv6_addrs *ipv6;
0339 
0340             ipv6 = skb_flow_dissector_target(flow_dissector,
0341                              FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
0342                              target_container);
0343             ipv6->src = key->u.ipv6.src;
0344             ipv6->dst = key->u.ipv6.dst;
0345         }
0346         break;
0347     }
0348 
0349     if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
0350         struct flow_dissector_key_keyid *keyid;
0351 
0352         keyid = skb_flow_dissector_target(flow_dissector,
0353                           FLOW_DISSECTOR_KEY_ENC_KEYID,
0354                           target_container);
0355         keyid->keyid = tunnel_id_to_key32(key->tun_id);
0356     }
0357 
0358     if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
0359         struct flow_dissector_key_ports *tp;
0360 
0361         tp = skb_flow_dissector_target(flow_dissector,
0362                            FLOW_DISSECTOR_KEY_ENC_PORTS,
0363                            target_container);
0364         tp->src = key->tp_src;
0365         tp->dst = key->tp_dst;
0366     }
0367 
0368     if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
0369         struct flow_dissector_key_ip *ip;
0370 
0371         ip = skb_flow_dissector_target(flow_dissector,
0372                            FLOW_DISSECTOR_KEY_ENC_IP,
0373                            target_container);
0374         ip->tos = key->tos;
0375         ip->ttl = key->ttl;
0376     }
0377 
0378     if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
0379         struct flow_dissector_key_enc_opts *enc_opt;
0380 
0381         enc_opt = skb_flow_dissector_target(flow_dissector,
0382                             FLOW_DISSECTOR_KEY_ENC_OPTS,
0383                             target_container);
0384 
0385         if (info->options_len) {
0386             enc_opt->len = info->options_len;
0387             ip_tunnel_info_opts_get(enc_opt->data, info);
0388             enc_opt->dst_opt_type = info->key.tun_flags &
0389                         TUNNEL_OPTIONS_PRESENT;
0390         }
0391     }
0392 }
0393 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
0394 
0395 void skb_flow_dissect_hash(const struct sk_buff *skb,
0396                struct flow_dissector *flow_dissector,
0397                void *target_container)
0398 {
0399     struct flow_dissector_key_hash *key;
0400 
0401     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH))
0402         return;
0403 
0404     key = skb_flow_dissector_target(flow_dissector,
0405                     FLOW_DISSECTOR_KEY_HASH,
0406                     target_container);
0407 
0408     key->hash = skb_get_hash_raw(skb);
0409 }
0410 EXPORT_SYMBOL(skb_flow_dissect_hash);
0411 
0412 static enum flow_dissect_ret
0413 __skb_flow_dissect_mpls(const struct sk_buff *skb,
0414             struct flow_dissector *flow_dissector,
0415             void *target_container, const void *data, int nhoff,
0416             int hlen, int lse_index, bool *entropy_label)
0417 {
0418     struct mpls_label *hdr, _hdr;
0419     u32 entry, label, bos;
0420 
0421     if (!dissector_uses_key(flow_dissector,
0422                 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
0423         !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
0424         return FLOW_DISSECT_RET_OUT_GOOD;
0425 
0426     if (lse_index >= FLOW_DIS_MPLS_MAX)
0427         return FLOW_DISSECT_RET_OUT_GOOD;
0428 
0429     hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
0430                    hlen, &_hdr);
0431     if (!hdr)
0432         return FLOW_DISSECT_RET_OUT_BAD;
0433 
0434     entry = ntohl(hdr->entry);
0435     label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
0436     bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
0437 
0438     if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
0439         struct flow_dissector_key_mpls *key_mpls;
0440         struct flow_dissector_mpls_lse *lse;
0441 
0442         key_mpls = skb_flow_dissector_target(flow_dissector,
0443                              FLOW_DISSECTOR_KEY_MPLS,
0444                              target_container);
0445         lse = &key_mpls->ls[lse_index];
0446 
0447         lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
0448         lse->mpls_bos = bos;
0449         lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
0450         lse->mpls_label = label;
0451         dissector_set_mpls_lse(key_mpls, lse_index);
0452     }
0453 
0454     if (*entropy_label &&
0455         dissector_uses_key(flow_dissector,
0456                    FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
0457         struct flow_dissector_key_keyid *key_keyid;
0458 
0459         key_keyid = skb_flow_dissector_target(flow_dissector,
0460                               FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
0461                               target_container);
0462         key_keyid->keyid = cpu_to_be32(label);
0463     }
0464 
0465     *entropy_label = label == MPLS_LABEL_ENTROPY;
0466 
0467     return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN;
0468 }
0469 
0470 static enum flow_dissect_ret
0471 __skb_flow_dissect_arp(const struct sk_buff *skb,
0472                struct flow_dissector *flow_dissector,
0473                void *target_container, const void *data,
0474                int nhoff, int hlen)
0475 {
0476     struct flow_dissector_key_arp *key_arp;
0477     struct {
0478         unsigned char ar_sha[ETH_ALEN];
0479         unsigned char ar_sip[4];
0480         unsigned char ar_tha[ETH_ALEN];
0481         unsigned char ar_tip[4];
0482     } *arp_eth, _arp_eth;
0483     const struct arphdr *arp;
0484     struct arphdr _arp;
0485 
0486     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
0487         return FLOW_DISSECT_RET_OUT_GOOD;
0488 
0489     arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
0490                    hlen, &_arp);
0491     if (!arp)
0492         return FLOW_DISSECT_RET_OUT_BAD;
0493 
0494     if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
0495         arp->ar_pro != htons(ETH_P_IP) ||
0496         arp->ar_hln != ETH_ALEN ||
0497         arp->ar_pln != 4 ||
0498         (arp->ar_op != htons(ARPOP_REPLY) &&
0499          arp->ar_op != htons(ARPOP_REQUEST)))
0500         return FLOW_DISSECT_RET_OUT_BAD;
0501 
0502     arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
0503                        sizeof(_arp_eth), data,
0504                        hlen, &_arp_eth);
0505     if (!arp_eth)
0506         return FLOW_DISSECT_RET_OUT_BAD;
0507 
0508     key_arp = skb_flow_dissector_target(flow_dissector,
0509                         FLOW_DISSECTOR_KEY_ARP,
0510                         target_container);
0511 
0512     memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
0513     memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
0514 
0515     /* Only store the lower byte of the opcode;
0516      * this covers ARPOP_REPLY and ARPOP_REQUEST.
0517      */
0518     key_arp->op = ntohs(arp->ar_op) & 0xff;
0519 
0520     ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
0521     ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
0522 
0523     return FLOW_DISSECT_RET_OUT_GOOD;
0524 }
0525 
0526 static enum flow_dissect_ret
0527 __skb_flow_dissect_gre(const struct sk_buff *skb,
0528                struct flow_dissector_key_control *key_control,
0529                struct flow_dissector *flow_dissector,
0530                void *target_container, const void *data,
0531                __be16 *p_proto, int *p_nhoff, int *p_hlen,
0532                unsigned int flags)
0533 {
0534     struct flow_dissector_key_keyid *key_keyid;
0535     struct gre_base_hdr *hdr, _hdr;
0536     int offset = 0;
0537     u16 gre_ver;
0538 
0539     hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
0540                    data, *p_hlen, &_hdr);
0541     if (!hdr)
0542         return FLOW_DISSECT_RET_OUT_BAD;
0543 
0544     /* Only look inside GRE without routing */
0545     if (hdr->flags & GRE_ROUTING)
0546         return FLOW_DISSECT_RET_OUT_GOOD;
0547 
0548     /* Only look inside GRE for version 0 and 1 */
0549     gre_ver = ntohs(hdr->flags & GRE_VERSION);
0550     if (gre_ver > 1)
0551         return FLOW_DISSECT_RET_OUT_GOOD;
0552 
0553     *p_proto = hdr->protocol;
0554     if (gre_ver) {
0555         /* Version1 must be PPTP, and check the flags */
0556         if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
0557             return FLOW_DISSECT_RET_OUT_GOOD;
0558     }
0559 
0560     offset += sizeof(struct gre_base_hdr);
0561 
0562     if (hdr->flags & GRE_CSUM)
0563         offset += sizeof_field(struct gre_full_hdr, csum) +
0564               sizeof_field(struct gre_full_hdr, reserved1);
0565 
0566     if (hdr->flags & GRE_KEY) {
0567         const __be32 *keyid;
0568         __be32 _keyid;
0569 
0570         keyid = __skb_header_pointer(skb, *p_nhoff + offset,
0571                          sizeof(_keyid),
0572                          data, *p_hlen, &_keyid);
0573         if (!keyid)
0574             return FLOW_DISSECT_RET_OUT_BAD;
0575 
0576         if (dissector_uses_key(flow_dissector,
0577                        FLOW_DISSECTOR_KEY_GRE_KEYID)) {
0578             key_keyid = skb_flow_dissector_target(flow_dissector,
0579                                   FLOW_DISSECTOR_KEY_GRE_KEYID,
0580                                   target_container);
0581             if (gre_ver == 0)
0582                 key_keyid->keyid = *keyid;
0583             else
0584                 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
0585         }
0586         offset += sizeof_field(struct gre_full_hdr, key);
0587     }
0588 
0589     if (hdr->flags & GRE_SEQ)
0590         offset += sizeof_field(struct pptp_gre_header, seq);
0591 
0592     if (gre_ver == 0) {
0593         if (*p_proto == htons(ETH_P_TEB)) {
0594             const struct ethhdr *eth;
0595             struct ethhdr _eth;
0596 
0597             eth = __skb_header_pointer(skb, *p_nhoff + offset,
0598                            sizeof(_eth),
0599                            data, *p_hlen, &_eth);
0600             if (!eth)
0601                 return FLOW_DISSECT_RET_OUT_BAD;
0602             *p_proto = eth->h_proto;
0603             offset += sizeof(*eth);
0604 
0605             /* Cap headers that we access via pointers at the
0606              * end of the Ethernet header as our maximum alignment
0607              * at that point is only 2 bytes.
0608              */
0609             if (NET_IP_ALIGN)
0610                 *p_hlen = *p_nhoff + offset;
0611         }
0612     } else { /* version 1, must be PPTP */
0613         u8 _ppp_hdr[PPP_HDRLEN];
0614         u8 *ppp_hdr;
0615 
0616         if (hdr->flags & GRE_ACK)
0617             offset += sizeof_field(struct pptp_gre_header, ack);
0618 
0619         ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
0620                            sizeof(_ppp_hdr),
0621                            data, *p_hlen, _ppp_hdr);
0622         if (!ppp_hdr)
0623             return FLOW_DISSECT_RET_OUT_BAD;
0624 
0625         switch (PPP_PROTOCOL(ppp_hdr)) {
0626         case PPP_IP:
0627             *p_proto = htons(ETH_P_IP);
0628             break;
0629         case PPP_IPV6:
0630             *p_proto = htons(ETH_P_IPV6);
0631             break;
0632         default:
0633             /* Could probably catch some more like MPLS */
0634             break;
0635         }
0636 
0637         offset += PPP_HDRLEN;
0638     }
0639 
0640     *p_nhoff += offset;
0641     key_control->flags |= FLOW_DIS_ENCAPSULATION;
0642     if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
0643         return FLOW_DISSECT_RET_OUT_GOOD;
0644 
0645     return FLOW_DISSECT_RET_PROTO_AGAIN;
0646 }
0647 
0648 /**
0649  * __skb_flow_dissect_batadv() - dissect batman-adv header
0650  * @skb: sk_buff to with the batman-adv header
0651  * @key_control: flow dissectors control key
0652  * @data: raw buffer pointer to the packet, if NULL use skb->data
0653  * @p_proto: pointer used to update the protocol to process next
0654  * @p_nhoff: pointer used to update inner network header offset
0655  * @hlen: packet header length
0656  * @flags: any combination of FLOW_DISSECTOR_F_*
0657  *
0658  * ETH_P_BATMAN packets are tried to be dissected. Only
0659  * &struct batadv_unicast packets are actually processed because they contain an
0660  * inner ethernet header and are usually followed by actual network header. This
0661  * allows the flow dissector to continue processing the packet.
0662  *
0663  * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
0664  *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
0665  *  otherwise FLOW_DISSECT_RET_OUT_BAD
0666  */
0667 static enum flow_dissect_ret
0668 __skb_flow_dissect_batadv(const struct sk_buff *skb,
0669               struct flow_dissector_key_control *key_control,
0670               const void *data, __be16 *p_proto, int *p_nhoff,
0671               int hlen, unsigned int flags)
0672 {
0673     struct {
0674         struct batadv_unicast_packet batadv_unicast;
0675         struct ethhdr eth;
0676     } *hdr, _hdr;
0677 
0678     hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
0679                    &_hdr);
0680     if (!hdr)
0681         return FLOW_DISSECT_RET_OUT_BAD;
0682 
0683     if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
0684         return FLOW_DISSECT_RET_OUT_BAD;
0685 
0686     if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
0687         return FLOW_DISSECT_RET_OUT_BAD;
0688 
0689     *p_proto = hdr->eth.h_proto;
0690     *p_nhoff += sizeof(*hdr);
0691 
0692     key_control->flags |= FLOW_DIS_ENCAPSULATION;
0693     if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
0694         return FLOW_DISSECT_RET_OUT_GOOD;
0695 
0696     return FLOW_DISSECT_RET_PROTO_AGAIN;
0697 }
0698 
0699 static void
0700 __skb_flow_dissect_tcp(const struct sk_buff *skb,
0701                struct flow_dissector *flow_dissector,
0702                void *target_container, const void *data,
0703                int thoff, int hlen)
0704 {
0705     struct flow_dissector_key_tcp *key_tcp;
0706     struct tcphdr *th, _th;
0707 
0708     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
0709         return;
0710 
0711     th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
0712     if (!th)
0713         return;
0714 
0715     if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
0716         return;
0717 
0718     key_tcp = skb_flow_dissector_target(flow_dissector,
0719                         FLOW_DISSECTOR_KEY_TCP,
0720                         target_container);
0721     key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
0722 }
0723 
0724 static void
0725 __skb_flow_dissect_ports(const struct sk_buff *skb,
0726              struct flow_dissector *flow_dissector,
0727              void *target_container, const void *data,
0728              int nhoff, u8 ip_proto, int hlen)
0729 {
0730     enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
0731     struct flow_dissector_key_ports *key_ports;
0732 
0733     if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
0734         dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
0735     else if (dissector_uses_key(flow_dissector,
0736                     FLOW_DISSECTOR_KEY_PORTS_RANGE))
0737         dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
0738 
0739     if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
0740         return;
0741 
0742     key_ports = skb_flow_dissector_target(flow_dissector,
0743                           dissector_ports,
0744                           target_container);
0745     key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
0746                         data, hlen);
0747 }
0748 
0749 static void
0750 __skb_flow_dissect_ipv4(const struct sk_buff *skb,
0751             struct flow_dissector *flow_dissector,
0752             void *target_container, const void *data,
0753             const struct iphdr *iph)
0754 {
0755     struct flow_dissector_key_ip *key_ip;
0756 
0757     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
0758         return;
0759 
0760     key_ip = skb_flow_dissector_target(flow_dissector,
0761                        FLOW_DISSECTOR_KEY_IP,
0762                        target_container);
0763     key_ip->tos = iph->tos;
0764     key_ip->ttl = iph->ttl;
0765 }
0766 
0767 static void
0768 __skb_flow_dissect_ipv6(const struct sk_buff *skb,
0769             struct flow_dissector *flow_dissector,
0770             void *target_container, const void *data,
0771             const struct ipv6hdr *iph)
0772 {
0773     struct flow_dissector_key_ip *key_ip;
0774 
0775     if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
0776         return;
0777 
0778     key_ip = skb_flow_dissector_target(flow_dissector,
0779                        FLOW_DISSECTOR_KEY_IP,
0780                        target_container);
0781     key_ip->tos = ipv6_get_dsfield(iph);
0782     key_ip->ttl = iph->hop_limit;
0783 }
0784 
0785 /* Maximum number of protocol headers that can be parsed in
0786  * __skb_flow_dissect
0787  */
0788 #define MAX_FLOW_DISSECT_HDRS   15
0789 
0790 static bool skb_flow_dissect_allowed(int *num_hdrs)
0791 {
0792     ++*num_hdrs;
0793 
0794     return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
0795 }
0796 
0797 static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
0798                      struct flow_dissector *flow_dissector,
0799                      void *target_container)
0800 {
0801     struct flow_dissector_key_ports *key_ports = NULL;
0802     struct flow_dissector_key_control *key_control;
0803     struct flow_dissector_key_basic *key_basic;
0804     struct flow_dissector_key_addrs *key_addrs;
0805     struct flow_dissector_key_tags *key_tags;
0806 
0807     key_control = skb_flow_dissector_target(flow_dissector,
0808                         FLOW_DISSECTOR_KEY_CONTROL,
0809                         target_container);
0810     key_control->thoff = flow_keys->thoff;
0811     if (flow_keys->is_frag)
0812         key_control->flags |= FLOW_DIS_IS_FRAGMENT;
0813     if (flow_keys->is_first_frag)
0814         key_control->flags |= FLOW_DIS_FIRST_FRAG;
0815     if (flow_keys->is_encap)
0816         key_control->flags |= FLOW_DIS_ENCAPSULATION;
0817 
0818     key_basic = skb_flow_dissector_target(flow_dissector,
0819                           FLOW_DISSECTOR_KEY_BASIC,
0820                           target_container);
0821     key_basic->n_proto = flow_keys->n_proto;
0822     key_basic->ip_proto = flow_keys->ip_proto;
0823 
0824     if (flow_keys->addr_proto == ETH_P_IP &&
0825         dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
0826         key_addrs = skb_flow_dissector_target(flow_dissector,
0827                               FLOW_DISSECTOR_KEY_IPV4_ADDRS,
0828                               target_container);
0829         key_addrs->v4addrs.src = flow_keys->ipv4_src;
0830         key_addrs->v4addrs.dst = flow_keys->ipv4_dst;
0831         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
0832     } else if (flow_keys->addr_proto == ETH_P_IPV6 &&
0833            dissector_uses_key(flow_dissector,
0834                       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
0835         key_addrs = skb_flow_dissector_target(flow_dissector,
0836                               FLOW_DISSECTOR_KEY_IPV6_ADDRS,
0837                               target_container);
0838         memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
0839                sizeof(key_addrs->v6addrs.src));
0840         memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
0841                sizeof(key_addrs->v6addrs.dst));
0842         key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
0843     }
0844 
0845     if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
0846         key_ports = skb_flow_dissector_target(flow_dissector,
0847                               FLOW_DISSECTOR_KEY_PORTS,
0848                               target_container);
0849     else if (dissector_uses_key(flow_dissector,
0850                     FLOW_DISSECTOR_KEY_PORTS_RANGE))
0851         key_ports = skb_flow_dissector_target(flow_dissector,
0852                               FLOW_DISSECTOR_KEY_PORTS_RANGE,
0853                               target_container);
0854 
0855     if (key_ports) {
0856         key_ports->src = flow_keys->sport;
0857         key_ports->dst = flow_keys->dport;
0858     }
0859 
0860     if (dissector_uses_key(flow_dissector,
0861                    FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
0862         key_tags = skb_flow_dissector_target(flow_dissector,
0863                              FLOW_DISSECTOR_KEY_FLOW_LABEL,
0864                              target_container);
0865         key_tags->flow_label = ntohl(flow_keys->flow_label);
0866     }
0867 }
0868 
0869 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
0870               __be16 proto, int nhoff, int hlen, unsigned int flags)
0871 {
0872     struct bpf_flow_keys *flow_keys = ctx->flow_keys;
0873     u32 result;
0874 
0875     /* Pass parameters to the BPF program */
0876     memset(flow_keys, 0, sizeof(*flow_keys));
0877     flow_keys->n_proto = proto;
0878     flow_keys->nhoff = nhoff;
0879     flow_keys->thoff = flow_keys->nhoff;
0880 
0881     BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG !=
0882              (int)FLOW_DISSECTOR_F_PARSE_1ST_FRAG);
0883     BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL !=
0884              (int)FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
0885     BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP !=
0886              (int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
0887     flow_keys->flags = flags;
0888 
0889     result = bpf_prog_run_pin_on_cpu(prog, ctx);
0890 
0891     flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
0892     flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
0893                    flow_keys->nhoff, hlen);
0894 
0895     return result == BPF_OK;
0896 }
0897 
0898 static bool is_pppoe_ses_hdr_valid(const struct pppoe_hdr *hdr)
0899 {
0900     return hdr->ver == 1 && hdr->type == 1 && hdr->code == 0;
0901 }
0902 
0903 /**
0904  * __skb_flow_dissect - extract the flow_keys struct and return it
0905  * @net: associated network namespace, derived from @skb if NULL
0906  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
0907  * @flow_dissector: list of keys to dissect
0908  * @target_container: target structure to put dissected values into
0909  * @data: raw buffer pointer to the packet, if NULL use skb->data
0910  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
0911  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
0912  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
0913  * @flags: flags that control the dissection process, e.g.
0914  *         FLOW_DISSECTOR_F_STOP_AT_ENCAP.
0915  *
0916  * The function will try to retrieve individual keys into target specified
0917  * by flow_dissector from either the skbuff or a raw buffer specified by the
0918  * rest parameters.
0919  *
0920  * Caller must take care of zeroing target container memory.
0921  */
0922 bool __skb_flow_dissect(const struct net *net,
0923             const struct sk_buff *skb,
0924             struct flow_dissector *flow_dissector,
0925             void *target_container, const void *data,
0926             __be16 proto, int nhoff, int hlen, unsigned int flags)
0927 {
0928     struct flow_dissector_key_control *key_control;
0929     struct flow_dissector_key_basic *key_basic;
0930     struct flow_dissector_key_addrs *key_addrs;
0931     struct flow_dissector_key_tags *key_tags;
0932     struct flow_dissector_key_vlan *key_vlan;
0933     enum flow_dissect_ret fdret;
0934     enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
0935     bool mpls_el = false;
0936     int mpls_lse = 0;
0937     int num_hdrs = 0;
0938     u8 ip_proto = 0;
0939     bool ret;
0940 
0941     if (!data) {
0942         data = skb->data;
0943         proto = skb_vlan_tag_present(skb) ?
0944              skb->vlan_proto : skb->protocol;
0945         nhoff = skb_network_offset(skb);
0946         hlen = skb_headlen(skb);
0947 #if IS_ENABLED(CONFIG_NET_DSA)
0948         if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
0949                  proto == htons(ETH_P_XDSA))) {
0950             const struct dsa_device_ops *ops;
0951             int offset = 0;
0952 
0953             ops = skb->dev->dsa_ptr->tag_ops;
0954             /* Only DSA header taggers break flow dissection */
0955             if (ops->needed_headroom) {
0956                 if (ops->flow_dissect)
0957                     ops->flow_dissect(skb, &proto, &offset);
0958                 else
0959                     dsa_tag_generic_flow_dissect(skb,
0960                                      &proto,
0961                                      &offset);
0962                 hlen -= offset;
0963                 nhoff += offset;
0964             }
0965         }
0966 #endif
0967     }
0968 
0969     /* It is ensured by skb_flow_dissector_init() that control key will
0970      * be always present.
0971      */
0972     key_control = skb_flow_dissector_target(flow_dissector,
0973                         FLOW_DISSECTOR_KEY_CONTROL,
0974                         target_container);
0975 
0976     /* It is ensured by skb_flow_dissector_init() that basic key will
0977      * be always present.
0978      */
0979     key_basic = skb_flow_dissector_target(flow_dissector,
0980                           FLOW_DISSECTOR_KEY_BASIC,
0981                           target_container);
0982 
0983     if (skb) {
0984         if (!net) {
0985             if (skb->dev)
0986                 net = dev_net(skb->dev);
0987             else if (skb->sk)
0988                 net = sock_net(skb->sk);
0989         }
0990     }
0991 
0992     WARN_ON_ONCE(!net);
0993     if (net) {
0994         enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
0995         struct bpf_prog_array *run_array;
0996 
0997         rcu_read_lock();
0998         run_array = rcu_dereference(init_net.bpf.run_array[type]);
0999         if (!run_array)
1000             run_array = rcu_dereference(net->bpf.run_array[type]);
1001 
1002         if (run_array) {
1003             struct bpf_flow_keys flow_keys;
1004             struct bpf_flow_dissector ctx = {
1005                 .flow_keys = &flow_keys,
1006                 .data = data,
1007                 .data_end = data + hlen,
1008             };
1009             __be16 n_proto = proto;
1010             struct bpf_prog *prog;
1011 
1012             if (skb) {
1013                 ctx.skb = skb;
1014                 /* we can't use 'proto' in the skb case
1015                  * because it might be set to skb->vlan_proto
1016                  * which has been pulled from the data
1017                  */
1018                 n_proto = skb->protocol;
1019             }
1020 
1021             prog = READ_ONCE(run_array->items[0].prog);
1022             ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
1023                            hlen, flags);
1024             __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
1025                          target_container);
1026             rcu_read_unlock();
1027             return ret;
1028         }
1029         rcu_read_unlock();
1030     }
1031 
1032     if (dissector_uses_key(flow_dissector,
1033                    FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1034         struct ethhdr *eth = eth_hdr(skb);
1035         struct flow_dissector_key_eth_addrs *key_eth_addrs;
1036 
1037         key_eth_addrs = skb_flow_dissector_target(flow_dissector,
1038                               FLOW_DISSECTOR_KEY_ETH_ADDRS,
1039                               target_container);
1040         memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
1041     }
1042 
1043     if (dissector_uses_key(flow_dissector,
1044                    FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) {
1045         struct flow_dissector_key_num_of_vlans *key_num_of_vlans;
1046 
1047         key_num_of_vlans = skb_flow_dissector_target(flow_dissector,
1048                                  FLOW_DISSECTOR_KEY_NUM_OF_VLANS,
1049                                  target_container);
1050         key_num_of_vlans->num_of_vlans = 0;
1051     }
1052 
1053 proto_again:
1054     fdret = FLOW_DISSECT_RET_CONTINUE;
1055 
1056     switch (proto) {
1057     case htons(ETH_P_IP): {
1058         const struct iphdr *iph;
1059         struct iphdr _iph;
1060 
1061         iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
1062         if (!iph || iph->ihl < 5) {
1063             fdret = FLOW_DISSECT_RET_OUT_BAD;
1064             break;
1065         }
1066 
1067         nhoff += iph->ihl * 4;
1068 
1069         ip_proto = iph->protocol;
1070 
1071         if (dissector_uses_key(flow_dissector,
1072                        FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1073             key_addrs = skb_flow_dissector_target(flow_dissector,
1074                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1075                                   target_container);
1076 
1077             memcpy(&key_addrs->v4addrs.src, &iph->saddr,
1078                    sizeof(key_addrs->v4addrs.src));
1079             memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
1080                    sizeof(key_addrs->v4addrs.dst));
1081             key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1082         }
1083 
1084         __skb_flow_dissect_ipv4(skb, flow_dissector,
1085                     target_container, data, iph);
1086 
1087         if (ip_is_fragment(iph)) {
1088             key_control->flags |= FLOW_DIS_IS_FRAGMENT;
1089 
1090             if (iph->frag_off & htons(IP_OFFSET)) {
1091                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1092                 break;
1093             } else {
1094                 key_control->flags |= FLOW_DIS_FIRST_FRAG;
1095                 if (!(flags &
1096                       FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
1097                     fdret = FLOW_DISSECT_RET_OUT_GOOD;
1098                     break;
1099                 }
1100             }
1101         }
1102 
1103         break;
1104     }
1105     case htons(ETH_P_IPV6): {
1106         const struct ipv6hdr *iph;
1107         struct ipv6hdr _iph;
1108 
1109         iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
1110         if (!iph) {
1111             fdret = FLOW_DISSECT_RET_OUT_BAD;
1112             break;
1113         }
1114 
1115         ip_proto = iph->nexthdr;
1116         nhoff += sizeof(struct ipv6hdr);
1117 
1118         if (dissector_uses_key(flow_dissector,
1119                        FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1120             key_addrs = skb_flow_dissector_target(flow_dissector,
1121                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1122                                   target_container);
1123 
1124             memcpy(&key_addrs->v6addrs.src, &iph->saddr,
1125                    sizeof(key_addrs->v6addrs.src));
1126             memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
1127                    sizeof(key_addrs->v6addrs.dst));
1128             key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1129         }
1130 
1131         if ((dissector_uses_key(flow_dissector,
1132                     FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
1133              (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
1134             ip6_flowlabel(iph)) {
1135             __be32 flow_label = ip6_flowlabel(iph);
1136 
1137             if (dissector_uses_key(flow_dissector,
1138                            FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
1139                 key_tags = skb_flow_dissector_target(flow_dissector,
1140                                      FLOW_DISSECTOR_KEY_FLOW_LABEL,
1141                                      target_container);
1142                 key_tags->flow_label = ntohl(flow_label);
1143             }
1144             if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
1145                 fdret = FLOW_DISSECT_RET_OUT_GOOD;
1146                 break;
1147             }
1148         }
1149 
1150         __skb_flow_dissect_ipv6(skb, flow_dissector,
1151                     target_container, data, iph);
1152 
1153         break;
1154     }
1155     case htons(ETH_P_8021AD):
1156     case htons(ETH_P_8021Q): {
1157         const struct vlan_hdr *vlan = NULL;
1158         struct vlan_hdr _vlan;
1159         __be16 saved_vlan_tpid = proto;
1160 
1161         if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
1162             skb && skb_vlan_tag_present(skb)) {
1163             proto = skb->protocol;
1164         } else {
1165             vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
1166                             data, hlen, &_vlan);
1167             if (!vlan) {
1168                 fdret = FLOW_DISSECT_RET_OUT_BAD;
1169                 break;
1170             }
1171 
1172             proto = vlan->h_vlan_encapsulated_proto;
1173             nhoff += sizeof(*vlan);
1174         }
1175 
1176         if (dissector_uses_key(flow_dissector,
1177                        FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) {
1178             struct flow_dissector_key_num_of_vlans *key_nvs;
1179 
1180             key_nvs = skb_flow_dissector_target(flow_dissector,
1181                                 FLOW_DISSECTOR_KEY_NUM_OF_VLANS,
1182                                 target_container);
1183             key_nvs->num_of_vlans++;
1184         }
1185 
1186         if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
1187             dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
1188         } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
1189             dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
1190         } else {
1191             fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1192             break;
1193         }
1194 
1195         if (dissector_uses_key(flow_dissector, dissector_vlan)) {
1196             key_vlan = skb_flow_dissector_target(flow_dissector,
1197                                  dissector_vlan,
1198                                  target_container);
1199 
1200             if (!vlan) {
1201                 key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
1202                 key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
1203             } else {
1204                 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
1205                     VLAN_VID_MASK;
1206                 key_vlan->vlan_priority =
1207                     (ntohs(vlan->h_vlan_TCI) &
1208                      VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1209             }
1210             key_vlan->vlan_tpid = saved_vlan_tpid;
1211             key_vlan->vlan_eth_type = proto;
1212         }
1213 
1214         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1215         break;
1216     }
1217     case htons(ETH_P_PPP_SES): {
1218         struct {
1219             struct pppoe_hdr hdr;
1220             __be16 proto;
1221         } *hdr, _hdr;
1222         u16 ppp_proto;
1223 
1224         hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
1225         if (!hdr) {
1226             fdret = FLOW_DISSECT_RET_OUT_BAD;
1227             break;
1228         }
1229 
1230         if (!is_pppoe_ses_hdr_valid(&hdr->hdr)) {
1231             fdret = FLOW_DISSECT_RET_OUT_BAD;
1232             break;
1233         }
1234 
1235         /* least significant bit of the most significant octet
1236          * indicates if protocol field was compressed
1237          */
1238         ppp_proto = ntohs(hdr->proto);
1239         if (ppp_proto & 0x0100) {
1240             ppp_proto = ppp_proto >> 8;
1241             nhoff += PPPOE_SES_HLEN - 1;
1242         } else {
1243             nhoff += PPPOE_SES_HLEN;
1244         }
1245 
1246         if (ppp_proto == PPP_IP) {
1247             proto = htons(ETH_P_IP);
1248             fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1249         } else if (ppp_proto == PPP_IPV6) {
1250             proto = htons(ETH_P_IPV6);
1251             fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1252         } else if (ppp_proto == PPP_MPLS_UC) {
1253             proto = htons(ETH_P_MPLS_UC);
1254             fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1255         } else if (ppp_proto == PPP_MPLS_MC) {
1256             proto = htons(ETH_P_MPLS_MC);
1257             fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1258         } else if (ppp_proto_is_valid(ppp_proto)) {
1259             fdret = FLOW_DISSECT_RET_OUT_GOOD;
1260         } else {
1261             fdret = FLOW_DISSECT_RET_OUT_BAD;
1262             break;
1263         }
1264 
1265         if (dissector_uses_key(flow_dissector,
1266                        FLOW_DISSECTOR_KEY_PPPOE)) {
1267             struct flow_dissector_key_pppoe *key_pppoe;
1268 
1269             key_pppoe = skb_flow_dissector_target(flow_dissector,
1270                                   FLOW_DISSECTOR_KEY_PPPOE,
1271                                   target_container);
1272             key_pppoe->session_id = hdr->hdr.sid;
1273             key_pppoe->ppp_proto = htons(ppp_proto);
1274             key_pppoe->type = htons(ETH_P_PPP_SES);
1275         }
1276         break;
1277     }
1278     case htons(ETH_P_TIPC): {
1279         struct tipc_basic_hdr *hdr, _hdr;
1280 
1281         hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
1282                        data, hlen, &_hdr);
1283         if (!hdr) {
1284             fdret = FLOW_DISSECT_RET_OUT_BAD;
1285             break;
1286         }
1287 
1288         if (dissector_uses_key(flow_dissector,
1289                        FLOW_DISSECTOR_KEY_TIPC)) {
1290             key_addrs = skb_flow_dissector_target(flow_dissector,
1291                                   FLOW_DISSECTOR_KEY_TIPC,
1292                                   target_container);
1293             key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
1294             key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
1295         }
1296         fdret = FLOW_DISSECT_RET_OUT_GOOD;
1297         break;
1298     }
1299 
1300     case htons(ETH_P_MPLS_UC):
1301     case htons(ETH_P_MPLS_MC):
1302         fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
1303                         target_container, data,
1304                         nhoff, hlen, mpls_lse,
1305                         &mpls_el);
1306         nhoff += sizeof(struct mpls_label);
1307         mpls_lse++;
1308         break;
1309     case htons(ETH_P_FCOE):
1310         if ((hlen - nhoff) < FCOE_HEADER_LEN) {
1311             fdret = FLOW_DISSECT_RET_OUT_BAD;
1312             break;
1313         }
1314 
1315         nhoff += FCOE_HEADER_LEN;
1316         fdret = FLOW_DISSECT_RET_OUT_GOOD;
1317         break;
1318 
1319     case htons(ETH_P_ARP):
1320     case htons(ETH_P_RARP):
1321         fdret = __skb_flow_dissect_arp(skb, flow_dissector,
1322                            target_container, data,
1323                            nhoff, hlen);
1324         break;
1325 
1326     case htons(ETH_P_BATMAN):
1327         fdret = __skb_flow_dissect_batadv(skb, key_control, data,
1328                           &proto, &nhoff, hlen, flags);
1329         break;
1330 
1331     case htons(ETH_P_1588): {
1332         struct ptp_header *hdr, _hdr;
1333 
1334         hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
1335                        hlen, &_hdr);
1336         if (!hdr) {
1337             fdret = FLOW_DISSECT_RET_OUT_BAD;
1338             break;
1339         }
1340 
1341         nhoff += ntohs(hdr->message_length);
1342         fdret = FLOW_DISSECT_RET_OUT_GOOD;
1343         break;
1344     }
1345 
1346     case htons(ETH_P_PRP):
1347     case htons(ETH_P_HSR): {
1348         struct hsr_tag *hdr, _hdr;
1349 
1350         hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen,
1351                        &_hdr);
1352         if (!hdr) {
1353             fdret = FLOW_DISSECT_RET_OUT_BAD;
1354             break;
1355         }
1356 
1357         proto = hdr->encap_proto;
1358         nhoff += HSR_HLEN;
1359         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1360         break;
1361     }
1362 
1363     default:
1364         fdret = FLOW_DISSECT_RET_OUT_BAD;
1365         break;
1366     }
1367 
1368     /* Process result of proto processing */
1369     switch (fdret) {
1370     case FLOW_DISSECT_RET_OUT_GOOD:
1371         goto out_good;
1372     case FLOW_DISSECT_RET_PROTO_AGAIN:
1373         if (skb_flow_dissect_allowed(&num_hdrs))
1374             goto proto_again;
1375         goto out_good;
1376     case FLOW_DISSECT_RET_CONTINUE:
1377     case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1378         break;
1379     case FLOW_DISSECT_RET_OUT_BAD:
1380     default:
1381         goto out_bad;
1382     }
1383 
1384 ip_proto_again:
1385     fdret = FLOW_DISSECT_RET_CONTINUE;
1386 
1387     switch (ip_proto) {
1388     case IPPROTO_GRE:
1389         if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
1390             fdret = FLOW_DISSECT_RET_OUT_GOOD;
1391             break;
1392         }
1393 
1394         fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
1395                            target_container, data,
1396                            &proto, &nhoff, &hlen, flags);
1397         break;
1398 
1399     case NEXTHDR_HOP:
1400     case NEXTHDR_ROUTING:
1401     case NEXTHDR_DEST: {
1402         u8 _opthdr[2], *opthdr;
1403 
1404         if (proto != htons(ETH_P_IPV6))
1405             break;
1406 
1407         opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
1408                           data, hlen, &_opthdr);
1409         if (!opthdr) {
1410             fdret = FLOW_DISSECT_RET_OUT_BAD;
1411             break;
1412         }
1413 
1414         ip_proto = opthdr[0];
1415         nhoff += (opthdr[1] + 1) << 3;
1416 
1417         fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1418         break;
1419     }
1420     case NEXTHDR_FRAGMENT: {
1421         struct frag_hdr _fh, *fh;
1422 
1423         if (proto != htons(ETH_P_IPV6))
1424             break;
1425 
1426         fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
1427                       data, hlen, &_fh);
1428 
1429         if (!fh) {
1430             fdret = FLOW_DISSECT_RET_OUT_BAD;
1431             break;
1432         }
1433 
1434         key_control->flags |= FLOW_DIS_IS_FRAGMENT;
1435 
1436         nhoff += sizeof(_fh);
1437         ip_proto = fh->nexthdr;
1438 
1439         if (!(fh->frag_off & htons(IP6_OFFSET))) {
1440             key_control->flags |= FLOW_DIS_FIRST_FRAG;
1441             if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
1442                 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1443                 break;
1444             }
1445         }
1446 
1447         fdret = FLOW_DISSECT_RET_OUT_GOOD;
1448         break;
1449     }
1450     case IPPROTO_IPIP:
1451         if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
1452             fdret = FLOW_DISSECT_RET_OUT_GOOD;
1453             break;
1454         }
1455 
1456         proto = htons(ETH_P_IP);
1457 
1458         key_control->flags |= FLOW_DIS_ENCAPSULATION;
1459         if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1460             fdret = FLOW_DISSECT_RET_OUT_GOOD;
1461             break;
1462         }
1463 
1464         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1465         break;
1466 
1467     case IPPROTO_IPV6:
1468         if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
1469             fdret = FLOW_DISSECT_RET_OUT_GOOD;
1470             break;
1471         }
1472 
1473         proto = htons(ETH_P_IPV6);
1474 
1475         key_control->flags |= FLOW_DIS_ENCAPSULATION;
1476         if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1477             fdret = FLOW_DISSECT_RET_OUT_GOOD;
1478             break;
1479         }
1480 
1481         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1482         break;
1483 
1484 
1485     case IPPROTO_MPLS:
1486         proto = htons(ETH_P_MPLS_UC);
1487         fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1488         break;
1489 
1490     case IPPROTO_TCP:
1491         __skb_flow_dissect_tcp(skb, flow_dissector, target_container,
1492                        data, nhoff, hlen);
1493         break;
1494 
1495     case IPPROTO_ICMP:
1496     case IPPROTO_ICMPV6:
1497         __skb_flow_dissect_icmp(skb, flow_dissector, target_container,
1498                     data, nhoff, hlen);
1499         break;
1500 
1501     default:
1502         break;
1503     }
1504 
1505     if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT))
1506         __skb_flow_dissect_ports(skb, flow_dissector, target_container,
1507                      data, nhoff, ip_proto, hlen);
1508 
1509     /* Process result of IP proto processing */
1510     switch (fdret) {
1511     case FLOW_DISSECT_RET_PROTO_AGAIN:
1512         if (skb_flow_dissect_allowed(&num_hdrs))
1513             goto proto_again;
1514         break;
1515     case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1516         if (skb_flow_dissect_allowed(&num_hdrs))
1517             goto ip_proto_again;
1518         break;
1519     case FLOW_DISSECT_RET_OUT_GOOD:
1520     case FLOW_DISSECT_RET_CONTINUE:
1521         break;
1522     case FLOW_DISSECT_RET_OUT_BAD:
1523     default:
1524         goto out_bad;
1525     }
1526 
1527 out_good:
1528     ret = true;
1529 
1530 out:
1531     key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1532     key_basic->n_proto = proto;
1533     key_basic->ip_proto = ip_proto;
1534 
1535     return ret;
1536 
1537 out_bad:
1538     ret = false;
1539     goto out;
1540 }
1541 EXPORT_SYMBOL(__skb_flow_dissect);
1542 
1543 static siphash_aligned_key_t hashrnd;
1544 static __always_inline void __flow_hash_secret_init(void)
1545 {
1546     net_get_random_once(&hashrnd, sizeof(hashrnd));
1547 }
1548 
1549 static const void *flow_keys_hash_start(const struct flow_keys *flow)
1550 {
1551     BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
1552     return &flow->FLOW_KEYS_HASH_START_FIELD;
1553 }
1554 
1555 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1556 {
1557     size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
1558 
1559     BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
1560 
1561     switch (flow->control.addr_type) {
1562     case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1563         diff -= sizeof(flow->addrs.v4addrs);
1564         break;
1565     case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1566         diff -= sizeof(flow->addrs.v6addrs);
1567         break;
1568     case FLOW_DISSECTOR_KEY_TIPC:
1569         diff -= sizeof(flow->addrs.tipckey);
1570         break;
1571     }
1572     return sizeof(*flow) - diff;
1573 }
1574 
1575 __be32 flow_get_u32_src(const struct flow_keys *flow)
1576 {
1577     switch (flow->control.addr_type) {
1578     case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1579         return flow->addrs.v4addrs.src;
1580     case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1581         return (__force __be32)ipv6_addr_hash(
1582             &flow->addrs.v6addrs.src);
1583     case FLOW_DISSECTOR_KEY_TIPC:
1584         return flow->addrs.tipckey.key;
1585     default:
1586         return 0;
1587     }
1588 }
1589 EXPORT_SYMBOL(flow_get_u32_src);
1590 
1591 __be32 flow_get_u32_dst(const struct flow_keys *flow)
1592 {
1593     switch (flow->control.addr_type) {
1594     case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1595         return flow->addrs.v4addrs.dst;
1596     case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1597         return (__force __be32)ipv6_addr_hash(
1598             &flow->addrs.v6addrs.dst);
1599     default:
1600         return 0;
1601     }
1602 }
1603 EXPORT_SYMBOL(flow_get_u32_dst);
1604 
1605 /* Sort the source and destination IP and the ports,
1606  * to have consistent hash within the two directions
1607  */
1608 static inline void __flow_hash_consistentify(struct flow_keys *keys)
1609 {
1610     int addr_diff, i;
1611 
1612     switch (keys->control.addr_type) {
1613     case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1614         if ((__force u32)keys->addrs.v4addrs.dst <
1615             (__force u32)keys->addrs.v4addrs.src)
1616             swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
1617 
1618         if ((__force u16)keys->ports.dst <
1619             (__force u16)keys->ports.src) {
1620             swap(keys->ports.src, keys->ports.dst);
1621         }
1622         break;
1623     case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1624         addr_diff = memcmp(&keys->addrs.v6addrs.dst,
1625                    &keys->addrs.v6addrs.src,
1626                    sizeof(keys->addrs.v6addrs.dst));
1627         if (addr_diff < 0) {
1628             for (i = 0; i < 4; i++)
1629                 swap(keys->addrs.v6addrs.src.s6_addr32[i],
1630                      keys->addrs.v6addrs.dst.s6_addr32[i]);
1631         }
1632         if ((__force u16)keys->ports.dst <
1633             (__force u16)keys->ports.src) {
1634             swap(keys->ports.src, keys->ports.dst);
1635         }
1636         break;
1637     }
1638 }
1639 
1640 static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
1641                     const siphash_key_t *keyval)
1642 {
1643     u32 hash;
1644 
1645     __flow_hash_consistentify(keys);
1646 
1647     hash = siphash(flow_keys_hash_start(keys),
1648                flow_keys_hash_length(keys), keyval);
1649     if (!hash)
1650         hash = 1;
1651 
1652     return hash;
1653 }
1654 
1655 u32 flow_hash_from_keys(struct flow_keys *keys)
1656 {
1657     __flow_hash_secret_init();
1658     return __flow_hash_from_keys(keys, &hashrnd);
1659 }
1660 EXPORT_SYMBOL(flow_hash_from_keys);
1661 
1662 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1663                   struct flow_keys *keys,
1664                   const siphash_key_t *keyval)
1665 {
1666     skb_flow_dissect_flow_keys(skb, keys,
1667                    FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1668 
1669     return __flow_hash_from_keys(keys, keyval);
1670 }
1671 
1672 struct _flow_keys_digest_data {
1673     __be16  n_proto;
1674     u8  ip_proto;
1675     u8  padding;
1676     __be32  ports;
1677     __be32  src;
1678     __be32  dst;
1679 };
1680 
1681 void make_flow_keys_digest(struct flow_keys_digest *digest,
1682                const struct flow_keys *flow)
1683 {
1684     struct _flow_keys_digest_data *data =
1685         (struct _flow_keys_digest_data *)digest;
1686 
1687     BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
1688 
1689     memset(digest, 0, sizeof(*digest));
1690 
1691     data->n_proto = flow->basic.n_proto;
1692     data->ip_proto = flow->basic.ip_proto;
1693     data->ports = flow->ports.ports;
1694     data->src = flow->addrs.v4addrs.src;
1695     data->dst = flow->addrs.v4addrs.dst;
1696 }
1697 EXPORT_SYMBOL(make_flow_keys_digest);
1698 
1699 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
1700 
1701 u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1702 {
1703     struct flow_keys keys;
1704 
1705     __flow_hash_secret_init();
1706 
1707     memset(&keys, 0, sizeof(keys));
1708     __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
1709                &keys, NULL, 0, 0, 0,
1710                FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1711 
1712     return __flow_hash_from_keys(&keys, &hashrnd);
1713 }
1714 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1715 
1716 /**
1717  * __skb_get_hash: calculate a flow hash
1718  * @skb: sk_buff to calculate flow hash from
1719  *
1720  * This function calculates a flow hash based on src/dst addresses
1721  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
1722  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
1723  * if hash is a canonical 4-tuple hash over transport ports.
1724  */
1725 void __skb_get_hash(struct sk_buff *skb)
1726 {
1727     struct flow_keys keys;
1728     u32 hash;
1729 
1730     __flow_hash_secret_init();
1731 
1732     hash = ___skb_get_hash(skb, &keys, &hashrnd);
1733 
1734     __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1735 }
1736 EXPORT_SYMBOL(__skb_get_hash);
1737 
1738 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1739                const siphash_key_t *perturb)
1740 {
1741     struct flow_keys keys;
1742 
1743     return ___skb_get_hash(skb, &keys, perturb);
1744 }
1745 EXPORT_SYMBOL(skb_get_hash_perturb);
1746 
1747 u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1748            const struct flow_keys_basic *keys, int hlen)
1749 {
1750     u32 poff = keys->control.thoff;
1751 
1752     /* skip L4 headers for fragments after the first */
1753     if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
1754         !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
1755         return poff;
1756 
1757     switch (keys->basic.ip_proto) {
1758     case IPPROTO_TCP: {
1759         /* access doff as u8 to avoid unaligned access */
1760         const u8 *doff;
1761         u8 _doff;
1762 
1763         doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
1764                         data, hlen, &_doff);
1765         if (!doff)
1766             return poff;
1767 
1768         poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
1769         break;
1770     }
1771     case IPPROTO_UDP:
1772     case IPPROTO_UDPLITE:
1773         poff += sizeof(struct udphdr);
1774         break;
1775     /* For the rest, we do not really care about header
1776      * extensions at this point for now.
1777      */
1778     case IPPROTO_ICMP:
1779         poff += sizeof(struct icmphdr);
1780         break;
1781     case IPPROTO_ICMPV6:
1782         poff += sizeof(struct icmp6hdr);
1783         break;
1784     case IPPROTO_IGMP:
1785         poff += sizeof(struct igmphdr);
1786         break;
1787     case IPPROTO_DCCP:
1788         poff += sizeof(struct dccp_hdr);
1789         break;
1790     case IPPROTO_SCTP:
1791         poff += sizeof(struct sctphdr);
1792         break;
1793     }
1794 
1795     return poff;
1796 }
1797 
1798 /**
1799  * skb_get_poff - get the offset to the payload
1800  * @skb: sk_buff to get the payload offset from
1801  *
1802  * The function will get the offset to the payload as far as it could
1803  * be dissected.  The main user is currently BPF, so that we can dynamically
1804  * truncate packets without needing to push actual payload to the user
1805  * space and can analyze headers only, instead.
1806  */
1807 u32 skb_get_poff(const struct sk_buff *skb)
1808 {
1809     struct flow_keys_basic keys;
1810 
1811     if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
1812                           NULL, 0, 0, 0, 0))
1813         return 0;
1814 
1815     return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
1816 }
1817 
1818 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1819 {
1820     memset(keys, 0, sizeof(*keys));
1821 
1822     memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
1823         sizeof(keys->addrs.v6addrs.src));
1824     memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
1825         sizeof(keys->addrs.v6addrs.dst));
1826     keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1827     keys->ports.src = fl6->fl6_sport;
1828     keys->ports.dst = fl6->fl6_dport;
1829     keys->keyid.keyid = fl6->fl6_gre_key;
1830     keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1831     keys->basic.ip_proto = fl6->flowi6_proto;
1832 
1833     return flow_hash_from_keys(keys);
1834 }
1835 EXPORT_SYMBOL(__get_hash_from_flowi6);
1836 
1837 static const struct flow_dissector_key flow_keys_dissector_keys[] = {
1838     {
1839         .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1840         .offset = offsetof(struct flow_keys, control),
1841     },
1842     {
1843         .key_id = FLOW_DISSECTOR_KEY_BASIC,
1844         .offset = offsetof(struct flow_keys, basic),
1845     },
1846     {
1847         .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1848         .offset = offsetof(struct flow_keys, addrs.v4addrs),
1849     },
1850     {
1851         .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1852         .offset = offsetof(struct flow_keys, addrs.v6addrs),
1853     },
1854     {
1855         .key_id = FLOW_DISSECTOR_KEY_TIPC,
1856         .offset = offsetof(struct flow_keys, addrs.tipckey),
1857     },
1858     {
1859         .key_id = FLOW_DISSECTOR_KEY_PORTS,
1860         .offset = offsetof(struct flow_keys, ports),
1861     },
1862     {
1863         .key_id = FLOW_DISSECTOR_KEY_VLAN,
1864         .offset = offsetof(struct flow_keys, vlan),
1865     },
1866     {
1867         .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
1868         .offset = offsetof(struct flow_keys, tags),
1869     },
1870     {
1871         .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
1872         .offset = offsetof(struct flow_keys, keyid),
1873     },
1874 };
1875 
1876 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
1877     {
1878         .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1879         .offset = offsetof(struct flow_keys, control),
1880     },
1881     {
1882         .key_id = FLOW_DISSECTOR_KEY_BASIC,
1883         .offset = offsetof(struct flow_keys, basic),
1884     },
1885     {
1886         .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1887         .offset = offsetof(struct flow_keys, addrs.v4addrs),
1888     },
1889     {
1890         .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1891         .offset = offsetof(struct flow_keys, addrs.v6addrs),
1892     },
1893     {
1894         .key_id = FLOW_DISSECTOR_KEY_PORTS,
1895         .offset = offsetof(struct flow_keys, ports),
1896     },
1897 };
1898 
1899 static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
1900     {
1901         .key_id = FLOW_DISSECTOR_KEY_CONTROL,
1902         .offset = offsetof(struct flow_keys, control),
1903     },
1904     {
1905         .key_id = FLOW_DISSECTOR_KEY_BASIC,
1906         .offset = offsetof(struct flow_keys, basic),
1907     },
1908 };
1909 
1910 struct flow_dissector flow_keys_dissector __read_mostly;
1911 EXPORT_SYMBOL(flow_keys_dissector);
1912 
1913 struct flow_dissector flow_keys_basic_dissector __read_mostly;
1914 EXPORT_SYMBOL(flow_keys_basic_dissector);
1915 
1916 static int __init init_default_flow_dissectors(void)
1917 {
1918     skb_flow_dissector_init(&flow_keys_dissector,
1919                 flow_keys_dissector_keys,
1920                 ARRAY_SIZE(flow_keys_dissector_keys));
1921     skb_flow_dissector_init(&flow_keys_dissector_symmetric,
1922                 flow_keys_dissector_symmetric_keys,
1923                 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
1924     skb_flow_dissector_init(&flow_keys_basic_dissector,
1925                 flow_keys_basic_dissector_keys,
1926                 ARRAY_SIZE(flow_keys_basic_dissector_keys));
1927     return 0;
1928 }
1929 core_initcall(init_default_flow_dissectors);