Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Checksum updating actions
0004  *
0005  * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
0006  */
0007 
0008 #include <linux/types.h>
0009 #include <linux/init.h>
0010 #include <linux/kernel.h>
0011 #include <linux/module.h>
0012 #include <linux/spinlock.h>
0013 
0014 #include <linux/netlink.h>
0015 #include <net/netlink.h>
0016 #include <linux/rtnetlink.h>
0017 
0018 #include <linux/skbuff.h>
0019 
0020 #include <net/ip.h>
0021 #include <net/ipv6.h>
0022 #include <net/icmp.h>
0023 #include <linux/icmpv6.h>
0024 #include <linux/igmp.h>
0025 #include <net/tcp.h>
0026 #include <net/udp.h>
0027 #include <net/ip6_checksum.h>
0028 #include <net/sctp/checksum.h>
0029 
0030 #include <net/act_api.h>
0031 #include <net/pkt_cls.h>
0032 
0033 #include <linux/tc_act/tc_csum.h>
0034 #include <net/tc_act/tc_csum.h>
0035 
0036 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
0037     [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
0038 };
0039 
0040 static unsigned int csum_net_id;
0041 static struct tc_action_ops act_csum_ops;
0042 
0043 static int tcf_csum_init(struct net *net, struct nlattr *nla,
0044              struct nlattr *est, struct tc_action **a,
0045              struct tcf_proto *tp,
0046              u32 flags, struct netlink_ext_ack *extack)
0047 {
0048     struct tc_action_net *tn = net_generic(net, csum_net_id);
0049     bool bind = flags & TCA_ACT_FLAGS_BIND;
0050     struct tcf_csum_params *params_new;
0051     struct nlattr *tb[TCA_CSUM_MAX + 1];
0052     struct tcf_chain *goto_ch = NULL;
0053     struct tc_csum *parm;
0054     struct tcf_csum *p;
0055     int ret = 0, err;
0056     u32 index;
0057 
0058     if (nla == NULL)
0059         return -EINVAL;
0060 
0061     err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
0062                       NULL);
0063     if (err < 0)
0064         return err;
0065 
0066     if (tb[TCA_CSUM_PARMS] == NULL)
0067         return -EINVAL;
0068     parm = nla_data(tb[TCA_CSUM_PARMS]);
0069     index = parm->index;
0070     err = tcf_idr_check_alloc(tn, &index, a, bind);
0071     if (!err) {
0072         ret = tcf_idr_create_from_flags(tn, index, est, a,
0073                         &act_csum_ops, bind, flags);
0074         if (ret) {
0075             tcf_idr_cleanup(tn, index);
0076             return ret;
0077         }
0078         ret = ACT_P_CREATED;
0079     } else if (err > 0) {
0080         if (bind)/* dont override defaults */
0081             return 0;
0082         if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
0083             tcf_idr_release(*a, bind);
0084             return -EEXIST;
0085         }
0086     } else {
0087         return err;
0088     }
0089 
0090     err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
0091     if (err < 0)
0092         goto release_idr;
0093 
0094     p = to_tcf_csum(*a);
0095 
0096     params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
0097     if (unlikely(!params_new)) {
0098         err = -ENOMEM;
0099         goto put_chain;
0100     }
0101     params_new->update_flags = parm->update_flags;
0102 
0103     spin_lock_bh(&p->tcf_lock);
0104     goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
0105     params_new = rcu_replace_pointer(p->params, params_new,
0106                      lockdep_is_held(&p->tcf_lock));
0107     spin_unlock_bh(&p->tcf_lock);
0108 
0109     if (goto_ch)
0110         tcf_chain_put_by_act(goto_ch);
0111     if (params_new)
0112         kfree_rcu(params_new, rcu);
0113 
0114     return ret;
0115 put_chain:
0116     if (goto_ch)
0117         tcf_chain_put_by_act(goto_ch);
0118 release_idr:
0119     tcf_idr_release(*a, bind);
0120     return err;
0121 }
0122 
0123 /**
0124  * tcf_csum_skb_nextlayer - Get next layer pointer
0125  * @skb: sk_buff to use
0126  * @ihl: previous summed headers length
0127  * @ipl: complete packet length
0128  * @jhl: next header length
0129  *
0130  * Check the expected next layer availability in the specified sk_buff.
0131  * Return the next layer pointer if pass, NULL otherwise.
0132  */
0133 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
0134                     unsigned int ihl, unsigned int ipl,
0135                     unsigned int jhl)
0136 {
0137     int ntkoff = skb_network_offset(skb);
0138     int hl = ihl + jhl;
0139 
0140     if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
0141         skb_try_make_writable(skb, hl + ntkoff))
0142         return NULL;
0143     else
0144         return (void *)(skb_network_header(skb) + ihl);
0145 }
0146 
0147 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
0148                   unsigned int ipl)
0149 {
0150     struct icmphdr *icmph;
0151 
0152     icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
0153     if (icmph == NULL)
0154         return 0;
0155 
0156     icmph->checksum = 0;
0157     skb->csum = csum_partial(icmph, ipl - ihl, 0);
0158     icmph->checksum = csum_fold(skb->csum);
0159 
0160     skb->ip_summed = CHECKSUM_NONE;
0161 
0162     return 1;
0163 }
0164 
0165 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
0166                   unsigned int ihl, unsigned int ipl)
0167 {
0168     struct igmphdr *igmph;
0169 
0170     igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
0171     if (igmph == NULL)
0172         return 0;
0173 
0174     igmph->csum = 0;
0175     skb->csum = csum_partial(igmph, ipl - ihl, 0);
0176     igmph->csum = csum_fold(skb->csum);
0177 
0178     skb->ip_summed = CHECKSUM_NONE;
0179 
0180     return 1;
0181 }
0182 
0183 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
0184                   unsigned int ipl)
0185 {
0186     struct icmp6hdr *icmp6h;
0187     const struct ipv6hdr *ip6h;
0188 
0189     icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
0190     if (icmp6h == NULL)
0191         return 0;
0192 
0193     ip6h = ipv6_hdr(skb);
0194     icmp6h->icmp6_cksum = 0;
0195     skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
0196     icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0197                           ipl - ihl, IPPROTO_ICMPV6,
0198                           skb->csum);
0199 
0200     skb->ip_summed = CHECKSUM_NONE;
0201 
0202     return 1;
0203 }
0204 
0205 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
0206                  unsigned int ipl)
0207 {
0208     struct tcphdr *tcph;
0209     const struct iphdr *iph;
0210 
0211     if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
0212         return 1;
0213 
0214     tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
0215     if (tcph == NULL)
0216         return 0;
0217 
0218     iph = ip_hdr(skb);
0219     tcph->check = 0;
0220     skb->csum = csum_partial(tcph, ipl - ihl, 0);
0221     tcph->check = tcp_v4_check(ipl - ihl,
0222                    iph->saddr, iph->daddr, skb->csum);
0223 
0224     skb->ip_summed = CHECKSUM_NONE;
0225 
0226     return 1;
0227 }
0228 
0229 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
0230                  unsigned int ipl)
0231 {
0232     struct tcphdr *tcph;
0233     const struct ipv6hdr *ip6h;
0234 
0235     if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
0236         return 1;
0237 
0238     tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
0239     if (tcph == NULL)
0240         return 0;
0241 
0242     ip6h = ipv6_hdr(skb);
0243     tcph->check = 0;
0244     skb->csum = csum_partial(tcph, ipl - ihl, 0);
0245     tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
0246                       ipl - ihl, IPPROTO_TCP,
0247                       skb->csum);
0248 
0249     skb->ip_summed = CHECKSUM_NONE;
0250 
0251     return 1;
0252 }
0253 
0254 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
0255                  unsigned int ipl, int udplite)
0256 {
0257     struct udphdr *udph;
0258     const struct iphdr *iph;
0259     u16 ul;
0260 
0261     if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
0262         return 1;
0263 
0264     /*
0265      * Support both UDP and UDPLITE checksum algorithms, Don't use
0266      * udph->len to get the real length without any protocol check,
0267      * UDPLITE uses udph->len for another thing,
0268      * Use iph->tot_len, or just ipl.
0269      */
0270 
0271     udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
0272     if (udph == NULL)
0273         return 0;
0274 
0275     iph = ip_hdr(skb);
0276     ul = ntohs(udph->len);
0277 
0278     if (udplite || udph->check) {
0279 
0280         udph->check = 0;
0281 
0282         if (udplite) {
0283             if (ul == 0)
0284                 skb->csum = csum_partial(udph, ipl - ihl, 0);
0285             else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
0286                 skb->csum = csum_partial(udph, ul, 0);
0287             else
0288                 goto ignore_obscure_skb;
0289         } else {
0290             if (ul != ipl - ihl)
0291                 goto ignore_obscure_skb;
0292 
0293             skb->csum = csum_partial(udph, ul, 0);
0294         }
0295 
0296         udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
0297                         ul, iph->protocol,
0298                         skb->csum);
0299 
0300         if (!udph->check)
0301             udph->check = CSUM_MANGLED_0;
0302     }
0303 
0304     skb->ip_summed = CHECKSUM_NONE;
0305 
0306 ignore_obscure_skb:
0307     return 1;
0308 }
0309 
0310 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
0311                  unsigned int ipl, int udplite)
0312 {
0313     struct udphdr *udph;
0314     const struct ipv6hdr *ip6h;
0315     u16 ul;
0316 
0317     if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
0318         return 1;
0319 
0320     /*
0321      * Support both UDP and UDPLITE checksum algorithms, Don't use
0322      * udph->len to get the real length without any protocol check,
0323      * UDPLITE uses udph->len for another thing,
0324      * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
0325      */
0326 
0327     udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
0328     if (udph == NULL)
0329         return 0;
0330 
0331     ip6h = ipv6_hdr(skb);
0332     ul = ntohs(udph->len);
0333 
0334     udph->check = 0;
0335 
0336     if (udplite) {
0337         if (ul == 0)
0338             skb->csum = csum_partial(udph, ipl - ihl, 0);
0339 
0340         else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
0341             skb->csum = csum_partial(udph, ul, 0);
0342 
0343         else
0344             goto ignore_obscure_skb;
0345     } else {
0346         if (ul != ipl - ihl)
0347             goto ignore_obscure_skb;
0348 
0349         skb->csum = csum_partial(udph, ul, 0);
0350     }
0351 
0352     udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
0353                       udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
0354                       skb->csum);
0355 
0356     if (!udph->check)
0357         udph->check = CSUM_MANGLED_0;
0358 
0359     skb->ip_summed = CHECKSUM_NONE;
0360 
0361 ignore_obscure_skb:
0362     return 1;
0363 }
0364 
0365 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
0366              unsigned int ipl)
0367 {
0368     struct sctphdr *sctph;
0369 
0370     if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
0371         return 1;
0372 
0373     sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
0374     if (!sctph)
0375         return 0;
0376 
0377     sctph->checksum = sctp_compute_cksum(skb,
0378                          skb_network_offset(skb) + ihl);
0379     skb->ip_summed = CHECKSUM_NONE;
0380     skb->csum_not_inet = 0;
0381 
0382     return 1;
0383 }
0384 
0385 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
0386 {
0387     const struct iphdr *iph;
0388     int ntkoff;
0389 
0390     ntkoff = skb_network_offset(skb);
0391 
0392     if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
0393         goto fail;
0394 
0395     iph = ip_hdr(skb);
0396 
0397     switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
0398     case IPPROTO_ICMP:
0399         if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
0400             if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
0401                         ntohs(iph->tot_len)))
0402                 goto fail;
0403         break;
0404     case IPPROTO_IGMP:
0405         if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
0406             if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
0407                         ntohs(iph->tot_len)))
0408                 goto fail;
0409         break;
0410     case IPPROTO_TCP:
0411         if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
0412             if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
0413                            ntohs(iph->tot_len)))
0414                 goto fail;
0415         break;
0416     case IPPROTO_UDP:
0417         if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
0418             if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
0419                            ntohs(iph->tot_len), 0))
0420                 goto fail;
0421         break;
0422     case IPPROTO_UDPLITE:
0423         if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
0424             if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
0425                            ntohs(iph->tot_len), 1))
0426                 goto fail;
0427         break;
0428     case IPPROTO_SCTP:
0429         if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
0430             !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
0431             goto fail;
0432         break;
0433     }
0434 
0435     if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
0436         if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
0437             goto fail;
0438 
0439         ip_send_check(ip_hdr(skb));
0440     }
0441 
0442     return 1;
0443 
0444 fail:
0445     return 0;
0446 }
0447 
0448 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
0449                  unsigned int *pl)
0450 {
0451     int off, len, optlen;
0452     unsigned char *xh = (void *)ip6xh;
0453 
0454     off = sizeof(*ip6xh);
0455     len = ixhl - off;
0456 
0457     while (len > 1) {
0458         switch (xh[off]) {
0459         case IPV6_TLV_PAD1:
0460             optlen = 1;
0461             break;
0462         case IPV6_TLV_JUMBO:
0463             optlen = xh[off + 1] + 2;
0464             if (optlen != 6 || len < 6 || (off & 3) != 2)
0465                 /* wrong jumbo option length/alignment */
0466                 return 0;
0467             *pl = ntohl(*(__be32 *)(xh + off + 2));
0468             goto done;
0469         default:
0470             optlen = xh[off + 1] + 2;
0471             if (optlen > len)
0472                 /* ignore obscure options */
0473                 goto done;
0474             break;
0475         }
0476         off += optlen;
0477         len -= optlen;
0478     }
0479 
0480 done:
0481     return 1;
0482 }
0483 
0484 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
0485 {
0486     struct ipv6hdr *ip6h;
0487     struct ipv6_opt_hdr *ip6xh;
0488     unsigned int hl, ixhl;
0489     unsigned int pl;
0490     int ntkoff;
0491     u8 nexthdr;
0492 
0493     ntkoff = skb_network_offset(skb);
0494 
0495     hl = sizeof(*ip6h);
0496 
0497     if (!pskb_may_pull(skb, hl + ntkoff))
0498         goto fail;
0499 
0500     ip6h = ipv6_hdr(skb);
0501 
0502     pl = ntohs(ip6h->payload_len);
0503     nexthdr = ip6h->nexthdr;
0504 
0505     do {
0506         switch (nexthdr) {
0507         case NEXTHDR_FRAGMENT:
0508             goto ignore_skb;
0509         case NEXTHDR_ROUTING:
0510         case NEXTHDR_HOP:
0511         case NEXTHDR_DEST:
0512             if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
0513                 goto fail;
0514             ip6xh = (void *)(skb_network_header(skb) + hl);
0515             ixhl = ipv6_optlen(ip6xh);
0516             if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
0517                 goto fail;
0518             ip6xh = (void *)(skb_network_header(skb) + hl);
0519             if ((nexthdr == NEXTHDR_HOP) &&
0520                 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
0521                 goto fail;
0522             nexthdr = ip6xh->nexthdr;
0523             hl += ixhl;
0524             break;
0525         case IPPROTO_ICMPV6:
0526             if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
0527                 if (!tcf_csum_ipv6_icmp(skb,
0528                             hl, pl + sizeof(*ip6h)))
0529                     goto fail;
0530             goto done;
0531         case IPPROTO_TCP:
0532             if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
0533                 if (!tcf_csum_ipv6_tcp(skb,
0534                                hl, pl + sizeof(*ip6h)))
0535                     goto fail;
0536             goto done;
0537         case IPPROTO_UDP:
0538             if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
0539                 if (!tcf_csum_ipv6_udp(skb, hl,
0540                                pl + sizeof(*ip6h), 0))
0541                     goto fail;
0542             goto done;
0543         case IPPROTO_UDPLITE:
0544             if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
0545                 if (!tcf_csum_ipv6_udp(skb, hl,
0546                                pl + sizeof(*ip6h), 1))
0547                     goto fail;
0548             goto done;
0549         case IPPROTO_SCTP:
0550             if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
0551                 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
0552                 goto fail;
0553             goto done;
0554         default:
0555             goto ignore_skb;
0556         }
0557     } while (pskb_may_pull(skb, hl + 1 + ntkoff));
0558 
0559 done:
0560 ignore_skb:
0561     return 1;
0562 
0563 fail:
0564     return 0;
0565 }
0566 
0567 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
0568             struct tcf_result *res)
0569 {
0570     struct tcf_csum *p = to_tcf_csum(a);
0571     bool orig_vlan_tag_present = false;
0572     unsigned int vlan_hdr_count = 0;
0573     struct tcf_csum_params *params;
0574     u32 update_flags;
0575     __be16 protocol;
0576     int action;
0577 
0578     params = rcu_dereference_bh(p->params);
0579 
0580     tcf_lastuse_update(&p->tcf_tm);
0581     tcf_action_update_bstats(&p->common, skb);
0582 
0583     action = READ_ONCE(p->tcf_action);
0584     if (unlikely(action == TC_ACT_SHOT))
0585         goto drop;
0586 
0587     update_flags = params->update_flags;
0588     protocol = skb_protocol(skb, false);
0589 again:
0590     switch (protocol) {
0591     case cpu_to_be16(ETH_P_IP):
0592         if (!tcf_csum_ipv4(skb, update_flags))
0593             goto drop;
0594         break;
0595     case cpu_to_be16(ETH_P_IPV6):
0596         if (!tcf_csum_ipv6(skb, update_flags))
0597             goto drop;
0598         break;
0599     case cpu_to_be16(ETH_P_8021AD):
0600         fallthrough;
0601     case cpu_to_be16(ETH_P_8021Q):
0602         if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
0603             protocol = skb->protocol;
0604             orig_vlan_tag_present = true;
0605         } else {
0606             struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
0607 
0608             protocol = vlan->h_vlan_encapsulated_proto;
0609             skb_pull(skb, VLAN_HLEN);
0610             skb_reset_network_header(skb);
0611             vlan_hdr_count++;
0612         }
0613         goto again;
0614     }
0615 
0616 out:
0617     /* Restore the skb for the pulled VLAN tags */
0618     while (vlan_hdr_count--) {
0619         skb_push(skb, VLAN_HLEN);
0620         skb_reset_network_header(skb);
0621     }
0622 
0623     return action;
0624 
0625 drop:
0626     tcf_action_inc_drop_qstats(&p->common);
0627     action = TC_ACT_SHOT;
0628     goto out;
0629 }
0630 
0631 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
0632              int ref)
0633 {
0634     unsigned char *b = skb_tail_pointer(skb);
0635     struct tcf_csum *p = to_tcf_csum(a);
0636     struct tcf_csum_params *params;
0637     struct tc_csum opt = {
0638         .index   = p->tcf_index,
0639         .refcnt  = refcount_read(&p->tcf_refcnt) - ref,
0640         .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
0641     };
0642     struct tcf_t t;
0643 
0644     spin_lock_bh(&p->tcf_lock);
0645     params = rcu_dereference_protected(p->params,
0646                        lockdep_is_held(&p->tcf_lock));
0647     opt.action = p->tcf_action;
0648     opt.update_flags = params->update_flags;
0649 
0650     if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
0651         goto nla_put_failure;
0652 
0653     tcf_tm_dump(&t, &p->tcf_tm);
0654     if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
0655         goto nla_put_failure;
0656     spin_unlock_bh(&p->tcf_lock);
0657 
0658     return skb->len;
0659 
0660 nla_put_failure:
0661     spin_unlock_bh(&p->tcf_lock);
0662     nlmsg_trim(skb, b);
0663     return -1;
0664 }
0665 
0666 static void tcf_csum_cleanup(struct tc_action *a)
0667 {
0668     struct tcf_csum *p = to_tcf_csum(a);
0669     struct tcf_csum_params *params;
0670 
0671     params = rcu_dereference_protected(p->params, 1);
0672     if (params)
0673         kfree_rcu(params, rcu);
0674 }
0675 
0676 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
0677                struct netlink_callback *cb, int type,
0678                const struct tc_action_ops *ops,
0679                struct netlink_ext_ack *extack)
0680 {
0681     struct tc_action_net *tn = net_generic(net, csum_net_id);
0682 
0683     return tcf_generic_walker(tn, skb, cb, type, ops, extack);
0684 }
0685 
0686 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
0687 {
0688     struct tc_action_net *tn = net_generic(net, csum_net_id);
0689 
0690     return tcf_idr_search(tn, a, index);
0691 }
0692 
0693 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
0694 {
0695     return nla_total_size(sizeof(struct tc_csum));
0696 }
0697 
0698 static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
0699                       u32 *index_inc, bool bind,
0700                       struct netlink_ext_ack *extack)
0701 {
0702     if (bind) {
0703         struct flow_action_entry *entry = entry_data;
0704 
0705         entry->id = FLOW_ACTION_CSUM;
0706         entry->csum_flags = tcf_csum_update_flags(act);
0707         *index_inc = 1;
0708     } else {
0709         struct flow_offload_action *fl_action = entry_data;
0710 
0711         fl_action->id = FLOW_ACTION_CSUM;
0712     }
0713 
0714     return 0;
0715 }
0716 
0717 static struct tc_action_ops act_csum_ops = {
0718     .kind       = "csum",
0719     .id     = TCA_ID_CSUM,
0720     .owner      = THIS_MODULE,
0721     .act        = tcf_csum_act,
0722     .dump       = tcf_csum_dump,
0723     .init       = tcf_csum_init,
0724     .cleanup    = tcf_csum_cleanup,
0725     .walk       = tcf_csum_walker,
0726     .lookup     = tcf_csum_search,
0727     .get_fill_size  = tcf_csum_get_fill_size,
0728     .offload_act_setup = tcf_csum_offload_act_setup,
0729     .size       = sizeof(struct tcf_csum),
0730 };
0731 
0732 static __net_init int csum_init_net(struct net *net)
0733 {
0734     struct tc_action_net *tn = net_generic(net, csum_net_id);
0735 
0736     return tc_action_net_init(net, tn, &act_csum_ops);
0737 }
0738 
0739 static void __net_exit csum_exit_net(struct list_head *net_list)
0740 {
0741     tc_action_net_exit(net_list, csum_net_id);
0742 }
0743 
0744 static struct pernet_operations csum_net_ops = {
0745     .init = csum_init_net,
0746     .exit_batch = csum_exit_net,
0747     .id   = &csum_net_id,
0748     .size = sizeof(struct tc_action_net),
0749 };
0750 
0751 MODULE_DESCRIPTION("Checksum updating actions");
0752 MODULE_LICENSE("GPL");
0753 
0754 static int __init csum_init_module(void)
0755 {
0756     return tcf_register_action(&act_csum_ops, &csum_net_ops);
0757 }
0758 
0759 static void __exit csum_cleanup_module(void)
0760 {
0761     tcf_unregister_action(&act_csum_ops, &csum_net_ops);
0762 }
0763 
0764 module_init(csum_init_module);
0765 module_exit(csum_cleanup_module);