Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
0004  *
0005  * Development of this code funded by Astaro AG (http://www.astaro.com/)
0006  */
0007 
0008 #include <linux/kernel.h>
0009 #include <linux/init.h>
0010 #include <linux/module.h>
0011 #include <linux/netlink.h>
0012 #include <linux/netfilter.h>
0013 #include <linux/if_arp.h>
0014 #include <linux/netfilter/nf_tables.h>
0015 #include <net/netfilter/nf_tables_core.h>
0016 #include <net/netfilter/nf_tables_offload.h>
0017 #include <net/netfilter/nf_tables.h>
0018 
0019 struct nft_cmp_expr {
0020     struct nft_data     data;
0021     u8          sreg;
0022     u8          len;
0023     enum nft_cmp_ops    op:8;
0024 };
0025 
0026 void nft_cmp_eval(const struct nft_expr *expr,
0027           struct nft_regs *regs,
0028           const struct nft_pktinfo *pkt)
0029 {
0030     const struct nft_cmp_expr *priv = nft_expr_priv(expr);
0031     int d;
0032 
0033     d = memcmp(&regs->data[priv->sreg], &priv->data, priv->len);
0034     switch (priv->op) {
0035     case NFT_CMP_EQ:
0036         if (d != 0)
0037             goto mismatch;
0038         break;
0039     case NFT_CMP_NEQ:
0040         if (d == 0)
0041             goto mismatch;
0042         break;
0043     case NFT_CMP_LT:
0044         if (d == 0)
0045             goto mismatch;
0046         fallthrough;
0047     case NFT_CMP_LTE:
0048         if (d > 0)
0049             goto mismatch;
0050         break;
0051     case NFT_CMP_GT:
0052         if (d == 0)
0053             goto mismatch;
0054         fallthrough;
0055     case NFT_CMP_GTE:
0056         if (d < 0)
0057             goto mismatch;
0058         break;
0059     }
0060     return;
0061 
0062 mismatch:
0063     regs->verdict.code = NFT_BREAK;
0064 }
0065 
0066 static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
0067     [NFTA_CMP_SREG]     = { .type = NLA_U32 },
0068     [NFTA_CMP_OP]       = { .type = NLA_U32 },
0069     [NFTA_CMP_DATA]     = { .type = NLA_NESTED },
0070 };
0071 
0072 static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
0073             const struct nlattr * const tb[])
0074 {
0075     struct nft_cmp_expr *priv = nft_expr_priv(expr);
0076     struct nft_data_desc desc = {
0077         .type   = NFT_DATA_VALUE,
0078         .size   = sizeof(priv->data),
0079     };
0080     int err;
0081 
0082     err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
0083     if (err < 0)
0084         return err;
0085 
0086     err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
0087     if (err < 0)
0088         return err;
0089 
0090     priv->op  = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
0091     priv->len = desc.len;
0092     return 0;
0093 }
0094 
0095 static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
0096 {
0097     const struct nft_cmp_expr *priv = nft_expr_priv(expr);
0098 
0099     if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
0100         goto nla_put_failure;
0101     if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
0102         goto nla_put_failure;
0103 
0104     if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
0105               NFT_DATA_VALUE, priv->len) < 0)
0106         goto nla_put_failure;
0107     return 0;
0108 
0109 nla_put_failure:
0110     return -1;
0111 }
0112 
0113 union nft_cmp_offload_data {
0114     u16 val16;
0115     u32 val32;
0116     u64 val64;
0117 };
0118 
0119 static void nft_payload_n2h(union nft_cmp_offload_data *data,
0120                 const u8 *val, u32 len)
0121 {
0122     switch (len) {
0123     case 2:
0124         data->val16 = ntohs(*((__be16 *)val));
0125         break;
0126     case 4:
0127         data->val32 = ntohl(*((__be32 *)val));
0128         break;
0129     case 8:
0130         data->val64 = be64_to_cpu(*((__be64 *)val));
0131         break;
0132     default:
0133         WARN_ON_ONCE(1);
0134         break;
0135     }
0136 }
0137 
0138 static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
0139                  struct nft_flow_rule *flow,
0140                  const struct nft_cmp_expr *priv)
0141 {
0142     struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
0143     union nft_cmp_offload_data _data, _datamask;
0144     u8 *mask = (u8 *)&flow->match.mask;
0145     u8 *key = (u8 *)&flow->match.key;
0146     u8 *data, *datamask;
0147 
0148     if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
0149         return -EOPNOTSUPP;
0150 
0151     if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
0152         nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
0153         nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
0154         data = (u8 *)&_data;
0155         datamask = (u8 *)&_datamask;
0156     } else {
0157         data = (u8 *)&priv->data;
0158         datamask = (u8 *)&reg->mask;
0159     }
0160 
0161     memcpy(key + reg->offset, data, reg->len);
0162     memcpy(mask + reg->offset, datamask, reg->len);
0163 
0164     flow->match.dissector.used_keys |= BIT(reg->key);
0165     flow->match.dissector.offset[reg->key] = reg->base_offset;
0166 
0167     if (reg->key == FLOW_DISSECTOR_KEY_META &&
0168         reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
0169         nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
0170         return -EOPNOTSUPP;
0171 
0172     nft_offload_update_dependency(ctx, &priv->data, reg->len);
0173 
0174     return 0;
0175 }
0176 
0177 static int nft_cmp_offload(struct nft_offload_ctx *ctx,
0178                struct nft_flow_rule *flow,
0179                const struct nft_expr *expr)
0180 {
0181     const struct nft_cmp_expr *priv = nft_expr_priv(expr);
0182 
0183     return __nft_cmp_offload(ctx, flow, priv);
0184 }
0185 
0186 static const struct nft_expr_ops nft_cmp_ops = {
0187     .type       = &nft_cmp_type,
0188     .size       = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
0189     .eval       = nft_cmp_eval,
0190     .init       = nft_cmp_init,
0191     .dump       = nft_cmp_dump,
0192     .reduce     = NFT_REDUCE_READONLY,
0193     .offload    = nft_cmp_offload,
0194 };
0195 
0196 /* Calculate the mask for the nft_cmp_fast expression. On big endian the
0197  * mask needs to include the *upper* bytes when interpreting that data as
0198  * something smaller than the full u32, therefore a cpu_to_le32 is done.
0199  */
0200 static u32 nft_cmp_fast_mask(unsigned int len)
0201 {
0202     __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
0203                       data) * BITS_PER_BYTE - len));
0204 
0205     return (__force u32)mask;
0206 }
0207 
0208 static int nft_cmp_fast_init(const struct nft_ctx *ctx,
0209                  const struct nft_expr *expr,
0210                  const struct nlattr * const tb[])
0211 {
0212     struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
0213     struct nft_data data;
0214     struct nft_data_desc desc = {
0215         .type   = NFT_DATA_VALUE,
0216         .size   = sizeof(data),
0217     };
0218     int err;
0219 
0220     err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
0221     if (err < 0)
0222         return err;
0223 
0224     err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
0225     if (err < 0)
0226         return err;
0227 
0228     desc.len *= BITS_PER_BYTE;
0229 
0230     priv->mask = nft_cmp_fast_mask(desc.len);
0231     priv->data = data.data[0] & priv->mask;
0232     priv->len  = desc.len;
0233     priv->inv  = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
0234     return 0;
0235 }
0236 
0237 static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
0238                 struct nft_flow_rule *flow,
0239                 const struct nft_expr *expr)
0240 {
0241     const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
0242     struct nft_cmp_expr cmp = {
0243         .data   = {
0244             .data   = {
0245                 [0] = priv->data,
0246             },
0247         },
0248         .sreg   = priv->sreg,
0249         .len    = priv->len / BITS_PER_BYTE,
0250         .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
0251     };
0252 
0253     return __nft_cmp_offload(ctx, flow, &cmp);
0254 }
0255 
0256 static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
0257 {
0258     const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
0259     enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
0260     struct nft_data data;
0261 
0262     if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
0263         goto nla_put_failure;
0264     if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
0265         goto nla_put_failure;
0266 
0267     data.data[0] = priv->data;
0268     if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
0269               NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
0270         goto nla_put_failure;
0271     return 0;
0272 
0273 nla_put_failure:
0274     return -1;
0275 }
0276 
0277 const struct nft_expr_ops nft_cmp_fast_ops = {
0278     .type       = &nft_cmp_type,
0279     .size       = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
0280     .eval       = NULL, /* inlined */
0281     .init       = nft_cmp_fast_init,
0282     .dump       = nft_cmp_fast_dump,
0283     .reduce     = NFT_REDUCE_READONLY,
0284     .offload    = nft_cmp_fast_offload,
0285 };
0286 
0287 static u32 nft_cmp_mask(u32 bitlen)
0288 {
0289     return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen));
0290 }
0291 
0292 static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen)
0293 {
0294     int len = bitlen / BITS_PER_BYTE;
0295     int i, words = len / sizeof(u32);
0296 
0297     for (i = 0; i < words; i++) {
0298         data->data[i] = 0xffffffff;
0299         bitlen -= sizeof(u32) * BITS_PER_BYTE;
0300     }
0301 
0302     if (len % sizeof(u32))
0303         data->data[i++] = nft_cmp_mask(bitlen);
0304 
0305     for (; i < 4; i++)
0306         data->data[i] = 0;
0307 }
0308 
0309 static int nft_cmp16_fast_init(const struct nft_ctx *ctx,
0310                    const struct nft_expr *expr,
0311                    const struct nlattr * const tb[])
0312 {
0313     struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
0314     struct nft_data_desc desc = {
0315         .type   = NFT_DATA_VALUE,
0316         .size   = sizeof(priv->data),
0317     };
0318     int err;
0319 
0320     err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
0321     if (err < 0)
0322         return err;
0323 
0324     err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
0325     if (err < 0)
0326         return err;
0327 
0328     nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE);
0329     priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
0330     priv->len = desc.len;
0331 
0332     return 0;
0333 }
0334 
0335 static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx,
0336                   struct nft_flow_rule *flow,
0337                   const struct nft_expr *expr)
0338 {
0339     const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
0340     struct nft_cmp_expr cmp = {
0341         .data   = priv->data,
0342         .sreg   = priv->sreg,
0343         .len    = priv->len,
0344         .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
0345     };
0346 
0347     return __nft_cmp_offload(ctx, flow, &cmp);
0348 }
0349 
0350 static int nft_cmp16_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
0351 {
0352     const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr);
0353     enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
0354 
0355     if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
0356         goto nla_put_failure;
0357     if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
0358         goto nla_put_failure;
0359 
0360     if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
0361               NFT_DATA_VALUE, priv->len) < 0)
0362         goto nla_put_failure;
0363     return 0;
0364 
0365 nla_put_failure:
0366     return -1;
0367 }
0368 
0369 
0370 const struct nft_expr_ops nft_cmp16_fast_ops = {
0371     .type       = &nft_cmp_type,
0372     .size       = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)),
0373     .eval       = NULL, /* inlined */
0374     .init       = nft_cmp16_fast_init,
0375     .dump       = nft_cmp16_fast_dump,
0376     .reduce     = NFT_REDUCE_READONLY,
0377     .offload    = nft_cmp16_fast_offload,
0378 };
0379 
0380 static const struct nft_expr_ops *
0381 nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
0382 {
0383     struct nft_data data;
0384     struct nft_data_desc desc = {
0385         .type   = NFT_DATA_VALUE,
0386         .size   = sizeof(data),
0387     };
0388     enum nft_cmp_ops op;
0389     u8 sreg;
0390     int err;
0391 
0392     if (tb[NFTA_CMP_SREG] == NULL ||
0393         tb[NFTA_CMP_OP] == NULL ||
0394         tb[NFTA_CMP_DATA] == NULL)
0395         return ERR_PTR(-EINVAL);
0396 
0397     op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
0398     switch (op) {
0399     case NFT_CMP_EQ:
0400     case NFT_CMP_NEQ:
0401     case NFT_CMP_LT:
0402     case NFT_CMP_LTE:
0403     case NFT_CMP_GT:
0404     case NFT_CMP_GTE:
0405         break;
0406     default:
0407         return ERR_PTR(-EINVAL);
0408     }
0409 
0410     err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
0411     if (err < 0)
0412         return ERR_PTR(err);
0413 
0414     sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
0415 
0416     if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) {
0417         if (desc.len <= sizeof(u32))
0418             return &nft_cmp_fast_ops;
0419         else if (desc.len <= sizeof(data) &&
0420              ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) ||
0421               (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0)))
0422             return &nft_cmp16_fast_ops;
0423     }
0424     return &nft_cmp_ops;
0425 }
0426 
0427 struct nft_expr_type nft_cmp_type __read_mostly = {
0428     .name       = "cmp",
0429     .select_ops = nft_cmp_select_ops,
0430     .policy     = nft_cmp_policy,
0431     .maxattr    = NFTA_CMP_MAX,
0432     .owner      = THIS_MODULE,
0433 };