Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
0004  * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
0005  *
0006  * Development of this code funded by Astaro AG (http://www.astaro.com/)
0007  */
0008 
0009 #include <linux/kernel.h>
0010 #include <linux/if_vlan.h>
0011 #include <linux/init.h>
0012 #include <linux/module.h>
0013 #include <linux/netlink.h>
0014 #include <linux/netfilter.h>
0015 #include <linux/netfilter/nf_tables.h>
0016 #include <net/netfilter/nf_tables_core.h>
0017 #include <net/netfilter/nf_tables.h>
0018 #include <net/netfilter/nf_tables_offload.h>
0019 /* For layer 4 checksum field offset. */
0020 #include <linux/tcp.h>
0021 #include <linux/udp.h>
0022 #include <linux/icmpv6.h>
0023 #include <linux/ip.h>
0024 #include <linux/ipv6.h>
0025 #include <net/sctp/checksum.h>
0026 
0027 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
0028                      struct vlan_ethhdr *veth)
0029 {
0030     if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
0031         return false;
0032 
0033     veth->h_vlan_proto = skb->vlan_proto;
0034     veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
0035     veth->h_vlan_encapsulated_proto = skb->protocol;
0036 
0037     return true;
0038 }
0039 
0040 /* add vlan header into the user buffer for if tag was removed by offloads */
0041 static bool
0042 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
0043 {
0044     int mac_off = skb_mac_header(skb) - skb->data;
0045     u8 *vlanh, *dst_u8 = (u8 *) d;
0046     struct vlan_ethhdr veth;
0047     u8 vlan_hlen = 0;
0048 
0049     if ((skb->protocol == htons(ETH_P_8021AD) ||
0050          skb->protocol == htons(ETH_P_8021Q)) &&
0051         offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
0052         vlan_hlen += VLAN_HLEN;
0053 
0054     vlanh = (u8 *) &veth;
0055     if (offset < VLAN_ETH_HLEN + vlan_hlen) {
0056         u8 ethlen = len;
0057 
0058         if (vlan_hlen &&
0059             skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
0060             return false;
0061         else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
0062             return false;
0063 
0064         if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
0065             ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
0066 
0067         memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
0068 
0069         len -= ethlen;
0070         if (len == 0)
0071             return true;
0072 
0073         dst_u8 += ethlen;
0074         offset = ETH_HLEN + vlan_hlen;
0075     } else {
0076         offset -= VLAN_HLEN + vlan_hlen;
0077     }
0078 
0079     return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
0080 }
0081 
0082 static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
0083 {
0084     unsigned int thoff = nft_thoff(pkt);
0085 
0086     if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
0087         return -1;
0088 
0089     switch (pkt->tprot) {
0090     case IPPROTO_UDP:
0091         pkt->inneroff = thoff + sizeof(struct udphdr);
0092         break;
0093     case IPPROTO_TCP: {
0094         struct tcphdr *th, _tcph;
0095 
0096         th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
0097         if (!th)
0098             return -1;
0099 
0100         pkt->inneroff = thoff + __tcp_hdrlen(th);
0101         }
0102         break;
0103     default:
0104         return -1;
0105     }
0106 
0107     pkt->flags |= NFT_PKTINFO_INNER;
0108 
0109     return 0;
0110 }
0111 
0112 static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
0113 {
0114     if (!(pkt->flags & NFT_PKTINFO_INNER) &&
0115         __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
0116         return -1;
0117 
0118     return pkt->inneroff;
0119 }
0120 
0121 void nft_payload_eval(const struct nft_expr *expr,
0122               struct nft_regs *regs,
0123               const struct nft_pktinfo *pkt)
0124 {
0125     const struct nft_payload *priv = nft_expr_priv(expr);
0126     const struct sk_buff *skb = pkt->skb;
0127     u32 *dest = &regs->data[priv->dreg];
0128     int offset;
0129 
0130     if (priv->len % NFT_REG32_SIZE)
0131         dest[priv->len / NFT_REG32_SIZE] = 0;
0132 
0133     switch (priv->base) {
0134     case NFT_PAYLOAD_LL_HEADER:
0135         if (!skb_mac_header_was_set(skb))
0136             goto err;
0137 
0138         if (skb_vlan_tag_present(skb)) {
0139             if (!nft_payload_copy_vlan(dest, skb,
0140                            priv->offset, priv->len))
0141                 goto err;
0142             return;
0143         }
0144         offset = skb_mac_header(skb) - skb->data;
0145         break;
0146     case NFT_PAYLOAD_NETWORK_HEADER:
0147         offset = skb_network_offset(skb);
0148         break;
0149     case NFT_PAYLOAD_TRANSPORT_HEADER:
0150         if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
0151             goto err;
0152         offset = nft_thoff(pkt);
0153         break;
0154     case NFT_PAYLOAD_INNER_HEADER:
0155         offset = nft_payload_inner_offset(pkt);
0156         if (offset < 0)
0157             goto err;
0158         break;
0159     default:
0160         WARN_ON_ONCE(1);
0161         goto err;
0162     }
0163     offset += priv->offset;
0164 
0165     if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
0166         goto err;
0167     return;
0168 err:
0169     regs->verdict.code = NFT_BREAK;
0170 }
0171 
0172 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
0173     [NFTA_PAYLOAD_SREG]     = { .type = NLA_U32 },
0174     [NFTA_PAYLOAD_DREG]     = { .type = NLA_U32 },
0175     [NFTA_PAYLOAD_BASE]     = { .type = NLA_U32 },
0176     [NFTA_PAYLOAD_OFFSET]       = { .type = NLA_U32 },
0177     [NFTA_PAYLOAD_LEN]      = { .type = NLA_U32 },
0178     [NFTA_PAYLOAD_CSUM_TYPE]    = { .type = NLA_U32 },
0179     [NFTA_PAYLOAD_CSUM_OFFSET]  = { .type = NLA_U32 },
0180     [NFTA_PAYLOAD_CSUM_FLAGS]   = { .type = NLA_U32 },
0181 };
0182 
0183 static int nft_payload_init(const struct nft_ctx *ctx,
0184                 const struct nft_expr *expr,
0185                 const struct nlattr * const tb[])
0186 {
0187     struct nft_payload *priv = nft_expr_priv(expr);
0188 
0189     priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
0190     priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
0191     priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
0192 
0193     return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
0194                     &priv->dreg, NULL, NFT_DATA_VALUE,
0195                     priv->len);
0196 }
0197 
0198 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
0199 {
0200     const struct nft_payload *priv = nft_expr_priv(expr);
0201 
0202     if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
0203         nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
0204         nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
0205         nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
0206         goto nla_put_failure;
0207     return 0;
0208 
0209 nla_put_failure:
0210     return -1;
0211 }
0212 
0213 static bool nft_payload_reduce(struct nft_regs_track *track,
0214                    const struct nft_expr *expr)
0215 {
0216     const struct nft_payload *priv = nft_expr_priv(expr);
0217     const struct nft_payload *payload;
0218 
0219     if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
0220         nft_reg_track_update(track, expr, priv->dreg, priv->len);
0221         return false;
0222     }
0223 
0224     payload = nft_expr_priv(track->regs[priv->dreg].selector);
0225     if (priv->base != payload->base ||
0226         priv->offset != payload->offset ||
0227         priv->len != payload->len) {
0228         nft_reg_track_update(track, expr, priv->dreg, priv->len);
0229         return false;
0230     }
0231 
0232     if (!track->regs[priv->dreg].bitwise)
0233         return true;
0234 
0235     return nft_expr_reduce_bitwise(track, expr);
0236 }
0237 
0238 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
0239                      u32 priv_len, u32 field_len)
0240 {
0241     unsigned int remainder, delta, k;
0242     struct nft_data mask = {};
0243     __be32 remainder_mask;
0244 
0245     if (priv_len == field_len) {
0246         memset(&reg->mask, 0xff, priv_len);
0247         return true;
0248     } else if (priv_len > field_len) {
0249         return false;
0250     }
0251 
0252     memset(&mask, 0xff, field_len);
0253     remainder = priv_len % sizeof(u32);
0254     if (remainder) {
0255         k = priv_len / sizeof(u32);
0256         delta = field_len - priv_len;
0257         remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
0258         mask.data[k] = (__force u32)remainder_mask;
0259     }
0260 
0261     memcpy(&reg->mask, &mask, field_len);
0262 
0263     return true;
0264 }
0265 
0266 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
0267                   struct nft_flow_rule *flow,
0268                   const struct nft_payload *priv)
0269 {
0270     struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
0271 
0272     switch (priv->offset) {
0273     case offsetof(struct ethhdr, h_source):
0274         if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
0275             return -EOPNOTSUPP;
0276 
0277         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
0278                   src, ETH_ALEN, reg);
0279         break;
0280     case offsetof(struct ethhdr, h_dest):
0281         if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
0282             return -EOPNOTSUPP;
0283 
0284         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
0285                   dst, ETH_ALEN, reg);
0286         break;
0287     case offsetof(struct ethhdr, h_proto):
0288         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0289             return -EOPNOTSUPP;
0290 
0291         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
0292                   n_proto, sizeof(__be16), reg);
0293         nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
0294         break;
0295     case offsetof(struct vlan_ethhdr, h_vlan_TCI):
0296         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0297             return -EOPNOTSUPP;
0298 
0299         NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
0300                     vlan_tci, sizeof(__be16), reg,
0301                     NFT_OFFLOAD_F_NETWORK2HOST);
0302         break;
0303     case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
0304         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0305             return -EOPNOTSUPP;
0306 
0307         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
0308                   vlan_tpid, sizeof(__be16), reg);
0309         nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
0310         break;
0311     case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
0312         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0313             return -EOPNOTSUPP;
0314 
0315         NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
0316                     vlan_tci, sizeof(__be16), reg,
0317                     NFT_OFFLOAD_F_NETWORK2HOST);
0318         break;
0319     case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
0320                             sizeof(struct vlan_hdr):
0321         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0322             return -EOPNOTSUPP;
0323 
0324         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
0325                   vlan_tpid, sizeof(__be16), reg);
0326         nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
0327         break;
0328     default:
0329         return -EOPNOTSUPP;
0330     }
0331 
0332     return 0;
0333 }
0334 
0335 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
0336                   struct nft_flow_rule *flow,
0337                   const struct nft_payload *priv)
0338 {
0339     struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
0340 
0341     switch (priv->offset) {
0342     case offsetof(struct iphdr, saddr):
0343         if (!nft_payload_offload_mask(reg, priv->len,
0344                           sizeof(struct in_addr)))
0345             return -EOPNOTSUPP;
0346 
0347         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
0348                   sizeof(struct in_addr), reg);
0349         nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
0350         break;
0351     case offsetof(struct iphdr, daddr):
0352         if (!nft_payload_offload_mask(reg, priv->len,
0353                           sizeof(struct in_addr)))
0354             return -EOPNOTSUPP;
0355 
0356         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
0357                   sizeof(struct in_addr), reg);
0358         nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
0359         break;
0360     case offsetof(struct iphdr, protocol):
0361         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
0362             return -EOPNOTSUPP;
0363 
0364         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
0365                   sizeof(__u8), reg);
0366         nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
0367         break;
0368     default:
0369         return -EOPNOTSUPP;
0370     }
0371 
0372     return 0;
0373 }
0374 
0375 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
0376                   struct nft_flow_rule *flow,
0377                   const struct nft_payload *priv)
0378 {
0379     struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
0380 
0381     switch (priv->offset) {
0382     case offsetof(struct ipv6hdr, saddr):
0383         if (!nft_payload_offload_mask(reg, priv->len,
0384                           sizeof(struct in6_addr)))
0385             return -EOPNOTSUPP;
0386 
0387         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
0388                   sizeof(struct in6_addr), reg);
0389         nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
0390         break;
0391     case offsetof(struct ipv6hdr, daddr):
0392         if (!nft_payload_offload_mask(reg, priv->len,
0393                           sizeof(struct in6_addr)))
0394             return -EOPNOTSUPP;
0395 
0396         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
0397                   sizeof(struct in6_addr), reg);
0398         nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
0399         break;
0400     case offsetof(struct ipv6hdr, nexthdr):
0401         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
0402             return -EOPNOTSUPP;
0403 
0404         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
0405                   sizeof(__u8), reg);
0406         nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
0407         break;
0408     default:
0409         return -EOPNOTSUPP;
0410     }
0411 
0412     return 0;
0413 }
0414 
0415 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
0416                   struct nft_flow_rule *flow,
0417                   const struct nft_payload *priv)
0418 {
0419     int err;
0420 
0421     switch (ctx->dep.l3num) {
0422     case htons(ETH_P_IP):
0423         err = nft_payload_offload_ip(ctx, flow, priv);
0424         break;
0425     case htons(ETH_P_IPV6):
0426         err = nft_payload_offload_ip6(ctx, flow, priv);
0427         break;
0428     default:
0429         return -EOPNOTSUPP;
0430     }
0431 
0432     return err;
0433 }
0434 
0435 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
0436                    struct nft_flow_rule *flow,
0437                    const struct nft_payload *priv)
0438 {
0439     struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
0440 
0441     switch (priv->offset) {
0442     case offsetof(struct tcphdr, source):
0443         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0444             return -EOPNOTSUPP;
0445 
0446         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
0447                   sizeof(__be16), reg);
0448         break;
0449     case offsetof(struct tcphdr, dest):
0450         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0451             return -EOPNOTSUPP;
0452 
0453         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
0454                   sizeof(__be16), reg);
0455         break;
0456     default:
0457         return -EOPNOTSUPP;
0458     }
0459 
0460     return 0;
0461 }
0462 
0463 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
0464                    struct nft_flow_rule *flow,
0465                    const struct nft_payload *priv)
0466 {
0467     struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
0468 
0469     switch (priv->offset) {
0470     case offsetof(struct udphdr, source):
0471         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0472             return -EOPNOTSUPP;
0473 
0474         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
0475                   sizeof(__be16), reg);
0476         break;
0477     case offsetof(struct udphdr, dest):
0478         if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
0479             return -EOPNOTSUPP;
0480 
0481         NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
0482                   sizeof(__be16), reg);
0483         break;
0484     default:
0485         return -EOPNOTSUPP;
0486     }
0487 
0488     return 0;
0489 }
0490 
0491 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
0492                   struct nft_flow_rule *flow,
0493                   const struct nft_payload *priv)
0494 {
0495     int err;
0496 
0497     switch (ctx->dep.protonum) {
0498     case IPPROTO_TCP:
0499         err = nft_payload_offload_tcp(ctx, flow, priv);
0500         break;
0501     case IPPROTO_UDP:
0502         err = nft_payload_offload_udp(ctx, flow, priv);
0503         break;
0504     default:
0505         return -EOPNOTSUPP;
0506     }
0507 
0508     return err;
0509 }
0510 
0511 static int nft_payload_offload(struct nft_offload_ctx *ctx,
0512                    struct nft_flow_rule *flow,
0513                    const struct nft_expr *expr)
0514 {
0515     const struct nft_payload *priv = nft_expr_priv(expr);
0516     int err;
0517 
0518     switch (priv->base) {
0519     case NFT_PAYLOAD_LL_HEADER:
0520         err = nft_payload_offload_ll(ctx, flow, priv);
0521         break;
0522     case NFT_PAYLOAD_NETWORK_HEADER:
0523         err = nft_payload_offload_nh(ctx, flow, priv);
0524         break;
0525     case NFT_PAYLOAD_TRANSPORT_HEADER:
0526         err = nft_payload_offload_th(ctx, flow, priv);
0527         break;
0528     default:
0529         err = -EOPNOTSUPP;
0530         break;
0531     }
0532     return err;
0533 }
0534 
0535 static const struct nft_expr_ops nft_payload_ops = {
0536     .type       = &nft_payload_type,
0537     .size       = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
0538     .eval       = nft_payload_eval,
0539     .init       = nft_payload_init,
0540     .dump       = nft_payload_dump,
0541     .reduce     = nft_payload_reduce,
0542     .offload    = nft_payload_offload,
0543 };
0544 
0545 const struct nft_expr_ops nft_payload_fast_ops = {
0546     .type       = &nft_payload_type,
0547     .size       = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
0548     .eval       = nft_payload_eval,
0549     .init       = nft_payload_init,
0550     .dump       = nft_payload_dump,
0551     .reduce     = nft_payload_reduce,
0552     .offload    = nft_payload_offload,
0553 };
0554 
0555 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
0556 {
0557     *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
0558     if (*sum == 0)
0559         *sum = CSUM_MANGLED_0;
0560 }
0561 
0562 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
0563 {
0564     struct udphdr *uh, _uh;
0565 
0566     uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
0567     if (!uh)
0568         return false;
0569 
0570     return (__force bool)uh->check;
0571 }
0572 
0573 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
0574                      struct sk_buff *skb,
0575                      unsigned int *l4csum_offset)
0576 {
0577     if (pkt->fragoff)
0578         return -1;
0579 
0580     switch (pkt->tprot) {
0581     case IPPROTO_TCP:
0582         *l4csum_offset = offsetof(struct tcphdr, check);
0583         break;
0584     case IPPROTO_UDP:
0585         if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
0586             return -1;
0587         fallthrough;
0588     case IPPROTO_UDPLITE:
0589         *l4csum_offset = offsetof(struct udphdr, check);
0590         break;
0591     case IPPROTO_ICMPV6:
0592         *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
0593         break;
0594     default:
0595         return -1;
0596     }
0597 
0598     *l4csum_offset += nft_thoff(pkt);
0599     return 0;
0600 }
0601 
0602 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
0603 {
0604     struct sctphdr *sh;
0605 
0606     if (skb_ensure_writable(skb, offset + sizeof(*sh)))
0607         return -1;
0608 
0609     sh = (struct sctphdr *)(skb->data + offset);
0610     sh->checksum = sctp_compute_cksum(skb, offset);
0611     skb->ip_summed = CHECKSUM_UNNECESSARY;
0612     return 0;
0613 }
0614 
0615 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
0616                      struct sk_buff *skb,
0617                      __wsum fsum, __wsum tsum)
0618 {
0619     int l4csum_offset;
0620     __sum16 sum;
0621 
0622     /* If we cannot determine layer 4 checksum offset or this packet doesn't
0623      * require layer 4 checksum recalculation, skip this packet.
0624      */
0625     if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
0626         return 0;
0627 
0628     if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
0629         return -1;
0630 
0631     /* Checksum mangling for an arbitrary amount of bytes, based on
0632      * inet_proto_csum_replace*() functions.
0633      */
0634     if (skb->ip_summed != CHECKSUM_PARTIAL) {
0635         nft_csum_replace(&sum, fsum, tsum);
0636         if (skb->ip_summed == CHECKSUM_COMPLETE) {
0637             skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
0638                           tsum);
0639         }
0640     } else {
0641         sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
0642                       tsum));
0643     }
0644 
0645     if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
0646         skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
0647         return -1;
0648 
0649     return 0;
0650 }
0651 
0652 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
0653                  __wsum fsum, __wsum tsum, int csum_offset)
0654 {
0655     __sum16 sum;
0656 
0657     if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
0658         return -1;
0659 
0660     nft_csum_replace(&sum, fsum, tsum);
0661     if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
0662         skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
0663         return -1;
0664 
0665     return 0;
0666 }
0667 
0668 static void nft_payload_set_eval(const struct nft_expr *expr,
0669                  struct nft_regs *regs,
0670                  const struct nft_pktinfo *pkt)
0671 {
0672     const struct nft_payload_set *priv = nft_expr_priv(expr);
0673     struct sk_buff *skb = pkt->skb;
0674     const u32 *src = &regs->data[priv->sreg];
0675     int offset, csum_offset;
0676     __wsum fsum, tsum;
0677 
0678     switch (priv->base) {
0679     case NFT_PAYLOAD_LL_HEADER:
0680         if (!skb_mac_header_was_set(skb))
0681             goto err;
0682         offset = skb_mac_header(skb) - skb->data;
0683         break;
0684     case NFT_PAYLOAD_NETWORK_HEADER:
0685         offset = skb_network_offset(skb);
0686         break;
0687     case NFT_PAYLOAD_TRANSPORT_HEADER:
0688         if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
0689             goto err;
0690         offset = nft_thoff(pkt);
0691         break;
0692     case NFT_PAYLOAD_INNER_HEADER:
0693         offset = nft_payload_inner_offset(pkt);
0694         if (offset < 0)
0695             goto err;
0696         break;
0697     default:
0698         WARN_ON_ONCE(1);
0699         goto err;
0700     }
0701 
0702     csum_offset = offset + priv->csum_offset;
0703     offset += priv->offset;
0704 
0705     if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
0706         ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
0707           priv->base != NFT_PAYLOAD_INNER_HEADER) ||
0708          skb->ip_summed != CHECKSUM_PARTIAL)) {
0709         fsum = skb_checksum(skb, offset, priv->len, 0);
0710         tsum = csum_partial(src, priv->len, 0);
0711 
0712         if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
0713             nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
0714             goto err;
0715 
0716         if (priv->csum_flags &&
0717             nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
0718             goto err;
0719     }
0720 
0721     if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
0722         skb_store_bits(skb, offset, src, priv->len) < 0)
0723         goto err;
0724 
0725     if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
0726         pkt->tprot == IPPROTO_SCTP &&
0727         skb->ip_summed != CHECKSUM_PARTIAL) {
0728         if (pkt->fragoff == 0 &&
0729             nft_payload_csum_sctp(skb, nft_thoff(pkt)))
0730             goto err;
0731     }
0732 
0733     return;
0734 err:
0735     regs->verdict.code = NFT_BREAK;
0736 }
0737 
0738 static int nft_payload_set_init(const struct nft_ctx *ctx,
0739                 const struct nft_expr *expr,
0740                 const struct nlattr * const tb[])
0741 {
0742     struct nft_payload_set *priv = nft_expr_priv(expr);
0743     u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
0744     int err;
0745 
0746     priv->base        = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
0747     priv->offset      = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
0748     priv->len         = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
0749 
0750     if (tb[NFTA_PAYLOAD_CSUM_TYPE])
0751         csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
0752     if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
0753         err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
0754                       &csum_offset);
0755         if (err < 0)
0756             return err;
0757 
0758         priv->csum_offset = csum_offset;
0759     }
0760     if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
0761         u32 flags;
0762 
0763         flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
0764         if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
0765             return -EINVAL;
0766 
0767         priv->csum_flags = flags;
0768     }
0769 
0770     switch (csum_type) {
0771     case NFT_PAYLOAD_CSUM_NONE:
0772     case NFT_PAYLOAD_CSUM_INET:
0773         break;
0774     case NFT_PAYLOAD_CSUM_SCTP:
0775         if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
0776             return -EINVAL;
0777 
0778         if (priv->csum_offset != offsetof(struct sctphdr, checksum))
0779             return -EINVAL;
0780         break;
0781     default:
0782         return -EOPNOTSUPP;
0783     }
0784     priv->csum_type = csum_type;
0785 
0786     return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
0787                        priv->len);
0788 }
0789 
0790 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
0791 {
0792     const struct nft_payload_set *priv = nft_expr_priv(expr);
0793 
0794     if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
0795         nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
0796         nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
0797         nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
0798         nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
0799         nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
0800              htonl(priv->csum_offset)) ||
0801         nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
0802         goto nla_put_failure;
0803     return 0;
0804 
0805 nla_put_failure:
0806     return -1;
0807 }
0808 
0809 static bool nft_payload_set_reduce(struct nft_regs_track *track,
0810                    const struct nft_expr *expr)
0811 {
0812     int i;
0813 
0814     for (i = 0; i < NFT_REG32_NUM; i++) {
0815         if (!track->regs[i].selector)
0816             continue;
0817 
0818         if (track->regs[i].selector->ops != &nft_payload_ops &&
0819             track->regs[i].selector->ops != &nft_payload_fast_ops)
0820             continue;
0821 
0822         __nft_reg_track_cancel(track, i);
0823     }
0824 
0825     return false;
0826 }
0827 
0828 static const struct nft_expr_ops nft_payload_set_ops = {
0829     .type       = &nft_payload_type,
0830     .size       = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
0831     .eval       = nft_payload_set_eval,
0832     .init       = nft_payload_set_init,
0833     .dump       = nft_payload_set_dump,
0834     .reduce     = nft_payload_set_reduce,
0835 };
0836 
0837 static const struct nft_expr_ops *
0838 nft_payload_select_ops(const struct nft_ctx *ctx,
0839                const struct nlattr * const tb[])
0840 {
0841     enum nft_payload_bases base;
0842     unsigned int offset, len;
0843     int err;
0844 
0845     if (tb[NFTA_PAYLOAD_BASE] == NULL ||
0846         tb[NFTA_PAYLOAD_OFFSET] == NULL ||
0847         tb[NFTA_PAYLOAD_LEN] == NULL)
0848         return ERR_PTR(-EINVAL);
0849 
0850     base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
0851     switch (base) {
0852     case NFT_PAYLOAD_LL_HEADER:
0853     case NFT_PAYLOAD_NETWORK_HEADER:
0854     case NFT_PAYLOAD_TRANSPORT_HEADER:
0855     case NFT_PAYLOAD_INNER_HEADER:
0856         break;
0857     default:
0858         return ERR_PTR(-EOPNOTSUPP);
0859     }
0860 
0861     if (tb[NFTA_PAYLOAD_SREG] != NULL) {
0862         if (tb[NFTA_PAYLOAD_DREG] != NULL)
0863             return ERR_PTR(-EINVAL);
0864         return &nft_payload_set_ops;
0865     }
0866 
0867     if (tb[NFTA_PAYLOAD_DREG] == NULL)
0868         return ERR_PTR(-EINVAL);
0869 
0870     err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
0871     if (err < 0)
0872         return ERR_PTR(err);
0873 
0874     err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
0875     if (err < 0)
0876         return ERR_PTR(err);
0877 
0878     if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
0879         base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
0880         return &nft_payload_fast_ops;
0881     else
0882         return &nft_payload_ops;
0883 }
0884 
0885 struct nft_expr_type nft_payload_type __read_mostly = {
0886     .name       = "payload",
0887     .select_ops = nft_payload_select_ops,
0888     .policy     = nft_payload_policy,
0889     .maxattr    = NFTA_PAYLOAD_MAX,
0890     .owner      = THIS_MODULE,
0891 };