0001
0002 #include <linux/kernel.h>
0003 #include <linux/init.h>
0004 #include <linux/module.h>
0005 #include <linux/seqlock.h>
0006 #include <linux/netlink.h>
0007 #include <linux/netfilter.h>
0008 #include <linux/netfilter/nf_tables.h>
0009 #include <net/netfilter/nf_tables.h>
0010 #include <net/dst_metadata.h>
0011 #include <net/ip_tunnels.h>
0012 #include <net/vxlan.h>
0013 #include <net/erspan.h>
0014 #include <net/geneve.h>
0015
0016 struct nft_tunnel {
0017 enum nft_tunnel_keys key:8;
0018 u8 dreg;
0019 enum nft_tunnel_mode mode:8;
0020 u8 len;
0021 };
0022
0023 static void nft_tunnel_get_eval(const struct nft_expr *expr,
0024 struct nft_regs *regs,
0025 const struct nft_pktinfo *pkt)
0026 {
0027 const struct nft_tunnel *priv = nft_expr_priv(expr);
0028 u32 *dest = ®s->data[priv->dreg];
0029 struct ip_tunnel_info *tun_info;
0030
0031 tun_info = skb_tunnel_info(pkt->skb);
0032
0033 switch (priv->key) {
0034 case NFT_TUNNEL_PATH:
0035 if (!tun_info) {
0036 nft_reg_store8(dest, false);
0037 return;
0038 }
0039 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
0040 (priv->mode == NFT_TUNNEL_MODE_RX &&
0041 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
0042 (priv->mode == NFT_TUNNEL_MODE_TX &&
0043 (tun_info->mode & IP_TUNNEL_INFO_TX)))
0044 nft_reg_store8(dest, true);
0045 else
0046 nft_reg_store8(dest, false);
0047 break;
0048 case NFT_TUNNEL_ID:
0049 if (!tun_info) {
0050 regs->verdict.code = NFT_BREAK;
0051 return;
0052 }
0053 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
0054 (priv->mode == NFT_TUNNEL_MODE_RX &&
0055 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
0056 (priv->mode == NFT_TUNNEL_MODE_TX &&
0057 (tun_info->mode & IP_TUNNEL_INFO_TX)))
0058 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
0059 else
0060 regs->verdict.code = NFT_BREAK;
0061 break;
0062 default:
0063 WARN_ON(1);
0064 regs->verdict.code = NFT_BREAK;
0065 }
0066 }
0067
0068 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
0069 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
0070 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
0071 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
0072 };
0073
0074 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
0075 const struct nft_expr *expr,
0076 const struct nlattr * const tb[])
0077 {
0078 struct nft_tunnel *priv = nft_expr_priv(expr);
0079 u32 len;
0080
0081 if (!tb[NFTA_TUNNEL_KEY] ||
0082 !tb[NFTA_TUNNEL_DREG])
0083 return -EINVAL;
0084
0085 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
0086 switch (priv->key) {
0087 case NFT_TUNNEL_PATH:
0088 len = sizeof(u8);
0089 break;
0090 case NFT_TUNNEL_ID:
0091 len = sizeof(u32);
0092 break;
0093 default:
0094 return -EOPNOTSUPP;
0095 }
0096
0097 if (tb[NFTA_TUNNEL_MODE]) {
0098 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
0099 if (priv->mode > NFT_TUNNEL_MODE_MAX)
0100 return -EOPNOTSUPP;
0101 } else {
0102 priv->mode = NFT_TUNNEL_MODE_NONE;
0103 }
0104
0105 priv->len = len;
0106 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
0107 NULL, NFT_DATA_VALUE, len);
0108 }
0109
0110 static int nft_tunnel_get_dump(struct sk_buff *skb,
0111 const struct nft_expr *expr)
0112 {
0113 const struct nft_tunnel *priv = nft_expr_priv(expr);
0114
0115 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
0116 goto nla_put_failure;
0117 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
0118 goto nla_put_failure;
0119 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
0120 goto nla_put_failure;
0121 return 0;
0122
0123 nla_put_failure:
0124 return -1;
0125 }
0126
0127 static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
0128 const struct nft_expr *expr)
0129 {
0130 const struct nft_tunnel *priv = nft_expr_priv(expr);
0131 const struct nft_tunnel *tunnel;
0132
0133 if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
0134 nft_reg_track_update(track, expr, priv->dreg, priv->len);
0135 return false;
0136 }
0137
0138 tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
0139 if (priv->key != tunnel->key ||
0140 priv->dreg != tunnel->dreg ||
0141 priv->mode != tunnel->mode) {
0142 nft_reg_track_update(track, expr, priv->dreg, priv->len);
0143 return false;
0144 }
0145
0146 if (!track->regs[priv->dreg].bitwise)
0147 return true;
0148
0149 return false;
0150 }
0151
0152 static struct nft_expr_type nft_tunnel_type;
0153 static const struct nft_expr_ops nft_tunnel_get_ops = {
0154 .type = &nft_tunnel_type,
0155 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
0156 .eval = nft_tunnel_get_eval,
0157 .init = nft_tunnel_get_init,
0158 .dump = nft_tunnel_get_dump,
0159 .reduce = nft_tunnel_get_reduce,
0160 };
0161
0162 static struct nft_expr_type nft_tunnel_type __read_mostly = {
0163 .name = "tunnel",
0164 .family = NFPROTO_NETDEV,
0165 .ops = &nft_tunnel_get_ops,
0166 .policy = nft_tunnel_policy,
0167 .maxattr = NFTA_TUNNEL_MAX,
0168 .owner = THIS_MODULE,
0169 };
0170
0171 struct nft_tunnel_opts {
0172 union {
0173 struct vxlan_metadata vxlan;
0174 struct erspan_metadata erspan;
0175 u8 data[IP_TUNNEL_OPTS_MAX];
0176 } u;
0177 u32 len;
0178 __be16 flags;
0179 };
0180
0181 struct nft_tunnel_obj {
0182 struct metadata_dst *md;
0183 struct nft_tunnel_opts opts;
0184 };
0185
0186 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
0187 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
0188 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
0189 };
0190
0191 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
0192 const struct nlattr *attr,
0193 struct ip_tunnel_info *info)
0194 {
0195 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
0196 int err;
0197
0198 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
0199 nft_tunnel_ip_policy, NULL);
0200 if (err < 0)
0201 return err;
0202
0203 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
0204 return -EINVAL;
0205
0206 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
0207 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
0208 if (tb[NFTA_TUNNEL_KEY_IP_DST])
0209 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
0210
0211 return 0;
0212 }
0213
0214 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
0215 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
0216 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
0217 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
0218 };
0219
0220 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
0221 const struct nlattr *attr,
0222 struct ip_tunnel_info *info)
0223 {
0224 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
0225 int err;
0226
0227 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
0228 nft_tunnel_ip6_policy, NULL);
0229 if (err < 0)
0230 return err;
0231
0232 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
0233 return -EINVAL;
0234
0235 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
0236 memcpy(&info->key.u.ipv6.src,
0237 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
0238 sizeof(struct in6_addr));
0239 }
0240 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
0241 memcpy(&info->key.u.ipv6.dst,
0242 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
0243 sizeof(struct in6_addr));
0244 }
0245 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
0246 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
0247
0248 info->mode |= IP_TUNNEL_INFO_IPV6;
0249
0250 return 0;
0251 }
0252
0253 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
0254 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
0255 };
0256
0257 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
0258 struct nft_tunnel_opts *opts)
0259 {
0260 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
0261 int err;
0262
0263 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
0264 nft_tunnel_opts_vxlan_policy, NULL);
0265 if (err < 0)
0266 return err;
0267
0268 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
0269 return -EINVAL;
0270
0271 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
0272
0273 opts->len = sizeof(struct vxlan_metadata);
0274 opts->flags = TUNNEL_VXLAN_OPT;
0275
0276 return 0;
0277 }
0278
0279 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
0280 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
0281 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
0282 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
0283 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
0284 };
0285
0286 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
0287 struct nft_tunnel_opts *opts)
0288 {
0289 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
0290 uint8_t hwid, dir;
0291 int err, version;
0292
0293 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
0294 attr, nft_tunnel_opts_erspan_policy,
0295 NULL);
0296 if (err < 0)
0297 return err;
0298
0299 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
0300 return -EINVAL;
0301
0302 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
0303 switch (version) {
0304 case ERSPAN_VERSION:
0305 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
0306 return -EINVAL;
0307
0308 opts->u.erspan.u.index =
0309 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
0310 break;
0311 case ERSPAN_VERSION2:
0312 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
0313 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
0314 return -EINVAL;
0315
0316 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
0317 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
0318
0319 set_hwid(&opts->u.erspan.u.md2, hwid);
0320 opts->u.erspan.u.md2.dir = dir;
0321 break;
0322 default:
0323 return -EOPNOTSUPP;
0324 }
0325 opts->u.erspan.version = version;
0326
0327 opts->len = sizeof(struct erspan_metadata);
0328 opts->flags = TUNNEL_ERSPAN_OPT;
0329
0330 return 0;
0331 }
0332
0333 static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
0334 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
0335 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
0336 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
0337 };
0338
0339 static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
0340 struct nft_tunnel_opts *opts)
0341 {
0342 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
0343 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
0344 int err, data_len;
0345
0346 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
0347 nft_tunnel_opts_geneve_policy, NULL);
0348 if (err < 0)
0349 return err;
0350
0351 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
0352 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
0353 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
0354 return -EINVAL;
0355
0356 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
0357 data_len = nla_len(attr);
0358 if (data_len % 4)
0359 return -EINVAL;
0360
0361 opts->len += sizeof(*opt) + data_len;
0362 if (opts->len > IP_TUNNEL_OPTS_MAX)
0363 return -EINVAL;
0364
0365 memcpy(opt->opt_data, nla_data(attr), data_len);
0366 opt->length = data_len / 4;
0367 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
0368 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
0369 opts->flags = TUNNEL_GENEVE_OPT;
0370
0371 return 0;
0372 }
0373
0374 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
0375 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
0376 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
0377 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
0378 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
0379 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
0380 };
0381
0382 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
0383 const struct nlattr *attr,
0384 struct ip_tunnel_info *info,
0385 struct nft_tunnel_opts *opts)
0386 {
0387 struct nlattr *nla;
0388 __be16 type = 0;
0389 int err, rem;
0390
0391 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
0392 nft_tunnel_opts_policy, NULL);
0393 if (err < 0)
0394 return err;
0395
0396 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
0397 switch (nla_type(nla)) {
0398 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
0399 if (type)
0400 return -EINVAL;
0401 err = nft_tunnel_obj_vxlan_init(nla, opts);
0402 if (err)
0403 return err;
0404 type = TUNNEL_VXLAN_OPT;
0405 break;
0406 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
0407 if (type)
0408 return -EINVAL;
0409 err = nft_tunnel_obj_erspan_init(nla, opts);
0410 if (err)
0411 return err;
0412 type = TUNNEL_ERSPAN_OPT;
0413 break;
0414 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
0415 if (type && type != TUNNEL_GENEVE_OPT)
0416 return -EINVAL;
0417 err = nft_tunnel_obj_geneve_init(nla, opts);
0418 if (err)
0419 return err;
0420 type = TUNNEL_GENEVE_OPT;
0421 break;
0422 default:
0423 return -EOPNOTSUPP;
0424 }
0425 }
0426
0427 return err;
0428 }
0429
0430 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
0431 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
0432 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
0433 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
0434 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
0435 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
0436 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
0437 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
0438 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
0439 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
0440 };
0441
0442 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
0443 const struct nlattr * const tb[],
0444 struct nft_object *obj)
0445 {
0446 struct nft_tunnel_obj *priv = nft_obj_data(obj);
0447 struct ip_tunnel_info info;
0448 struct metadata_dst *md;
0449 int err;
0450
0451 if (!tb[NFTA_TUNNEL_KEY_ID])
0452 return -EINVAL;
0453
0454 memset(&info, 0, sizeof(info));
0455 info.mode = IP_TUNNEL_INFO_TX;
0456 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
0457 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
0458
0459 if (tb[NFTA_TUNNEL_KEY_IP]) {
0460 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
0461 if (err < 0)
0462 return err;
0463 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
0464 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
0465 if (err < 0)
0466 return err;
0467 } else {
0468 return -EINVAL;
0469 }
0470
0471 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
0472 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
0473 }
0474 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
0475 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
0476 }
0477
0478 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
0479 u32 tun_flags;
0480
0481 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
0482 if (tun_flags & ~NFT_TUNNEL_F_MASK)
0483 return -EOPNOTSUPP;
0484
0485 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
0486 info.key.tun_flags &= ~TUNNEL_CSUM;
0487 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
0488 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
0489 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
0490 info.key.tun_flags |= TUNNEL_SEQ;
0491 }
0492 if (tb[NFTA_TUNNEL_KEY_TOS])
0493 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
0494 if (tb[NFTA_TUNNEL_KEY_TTL])
0495 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
0496 else
0497 info.key.ttl = U8_MAX;
0498
0499 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
0500 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
0501 &info, &priv->opts);
0502 if (err < 0)
0503 return err;
0504 }
0505
0506 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
0507 if (!md)
0508 return -ENOMEM;
0509
0510 memcpy(&md->u.tun_info, &info, sizeof(info));
0511 #ifdef CONFIG_DST_CACHE
0512 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
0513 if (err < 0) {
0514 metadata_dst_free(md);
0515 return err;
0516 }
0517 #endif
0518 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
0519 priv->opts.flags);
0520 priv->md = md;
0521
0522 return 0;
0523 }
0524
0525 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
0526 struct nft_regs *regs,
0527 const struct nft_pktinfo *pkt)
0528 {
0529 struct nft_tunnel_obj *priv = nft_obj_data(obj);
0530 struct sk_buff *skb = pkt->skb;
0531
0532 skb_dst_drop(skb);
0533 dst_hold((struct dst_entry *) priv->md);
0534 skb_dst_set(skb, (struct dst_entry *) priv->md);
0535 }
0536
0537 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
0538 {
0539 struct nlattr *nest;
0540
0541 if (info->mode & IP_TUNNEL_INFO_IPV6) {
0542 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
0543 if (!nest)
0544 return -1;
0545
0546 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
0547 &info->key.u.ipv6.src) < 0 ||
0548 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
0549 &info->key.u.ipv6.dst) < 0 ||
0550 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
0551 info->key.label)) {
0552 nla_nest_cancel(skb, nest);
0553 return -1;
0554 }
0555
0556 nla_nest_end(skb, nest);
0557 } else {
0558 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
0559 if (!nest)
0560 return -1;
0561
0562 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
0563 info->key.u.ipv4.src) < 0 ||
0564 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
0565 info->key.u.ipv4.dst) < 0) {
0566 nla_nest_cancel(skb, nest);
0567 return -1;
0568 }
0569
0570 nla_nest_end(skb, nest);
0571 }
0572
0573 return 0;
0574 }
0575
0576 static int nft_tunnel_opts_dump(struct sk_buff *skb,
0577 struct nft_tunnel_obj *priv)
0578 {
0579 struct nft_tunnel_opts *opts = &priv->opts;
0580 struct nlattr *nest, *inner;
0581
0582 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
0583 if (!nest)
0584 return -1;
0585
0586 if (opts->flags & TUNNEL_VXLAN_OPT) {
0587 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
0588 if (!inner)
0589 goto failure;
0590 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
0591 htonl(opts->u.vxlan.gbp)))
0592 goto inner_failure;
0593 nla_nest_end(skb, inner);
0594 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
0595 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
0596 if (!inner)
0597 goto failure;
0598 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
0599 htonl(opts->u.erspan.version)))
0600 goto inner_failure;
0601 switch (opts->u.erspan.version) {
0602 case ERSPAN_VERSION:
0603 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
0604 opts->u.erspan.u.index))
0605 goto inner_failure;
0606 break;
0607 case ERSPAN_VERSION2:
0608 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
0609 get_hwid(&opts->u.erspan.u.md2)) ||
0610 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
0611 opts->u.erspan.u.md2.dir))
0612 goto inner_failure;
0613 break;
0614 }
0615 nla_nest_end(skb, inner);
0616 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
0617 struct geneve_opt *opt;
0618 int offset = 0;
0619
0620 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
0621 if (!inner)
0622 goto failure;
0623 while (opts->len > offset) {
0624 opt = (struct geneve_opt *)opts->u.data + offset;
0625 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
0626 opt->opt_class) ||
0627 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
0628 opt->type) ||
0629 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
0630 opt->length * 4, opt->opt_data))
0631 goto inner_failure;
0632 offset += sizeof(*opt) + opt->length * 4;
0633 }
0634 nla_nest_end(skb, inner);
0635 }
0636 nla_nest_end(skb, nest);
0637 return 0;
0638
0639 inner_failure:
0640 nla_nest_cancel(skb, inner);
0641 failure:
0642 nla_nest_cancel(skb, nest);
0643 return -1;
0644 }
0645
0646 static int nft_tunnel_ports_dump(struct sk_buff *skb,
0647 struct ip_tunnel_info *info)
0648 {
0649 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
0650 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
0651 return -1;
0652
0653 return 0;
0654 }
0655
0656 static int nft_tunnel_flags_dump(struct sk_buff *skb,
0657 struct ip_tunnel_info *info)
0658 {
0659 u32 flags = 0;
0660
0661 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
0662 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
0663 if (!(info->key.tun_flags & TUNNEL_CSUM))
0664 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
0665 if (info->key.tun_flags & TUNNEL_SEQ)
0666 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
0667
0668 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
0669 return -1;
0670
0671 return 0;
0672 }
0673
0674 static int nft_tunnel_obj_dump(struct sk_buff *skb,
0675 struct nft_object *obj, bool reset)
0676 {
0677 struct nft_tunnel_obj *priv = nft_obj_data(obj);
0678 struct ip_tunnel_info *info = &priv->md->u.tun_info;
0679
0680 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
0681 tunnel_id_to_key32(info->key.tun_id)) ||
0682 nft_tunnel_ip_dump(skb, info) < 0 ||
0683 nft_tunnel_ports_dump(skb, info) < 0 ||
0684 nft_tunnel_flags_dump(skb, info) < 0 ||
0685 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
0686 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
0687 nft_tunnel_opts_dump(skb, priv) < 0)
0688 goto nla_put_failure;
0689
0690 return 0;
0691
0692 nla_put_failure:
0693 return -1;
0694 }
0695
0696 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
0697 struct nft_object *obj)
0698 {
0699 struct nft_tunnel_obj *priv = nft_obj_data(obj);
0700
0701 metadata_dst_free(priv->md);
0702 }
0703
0704 static struct nft_object_type nft_tunnel_obj_type;
0705 static const struct nft_object_ops nft_tunnel_obj_ops = {
0706 .type = &nft_tunnel_obj_type,
0707 .size = sizeof(struct nft_tunnel_obj),
0708 .eval = nft_tunnel_obj_eval,
0709 .init = nft_tunnel_obj_init,
0710 .destroy = nft_tunnel_obj_destroy,
0711 .dump = nft_tunnel_obj_dump,
0712 };
0713
0714 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
0715 .type = NFT_OBJECT_TUNNEL,
0716 .ops = &nft_tunnel_obj_ops,
0717 .maxattr = NFTA_TUNNEL_KEY_MAX,
0718 .policy = nft_tunnel_key_policy,
0719 .owner = THIS_MODULE,
0720 };
0721
0722 static int __init nft_tunnel_module_init(void)
0723 {
0724 int err;
0725
0726 err = nft_register_expr(&nft_tunnel_type);
0727 if (err < 0)
0728 return err;
0729
0730 err = nft_register_obj(&nft_tunnel_obj_type);
0731 if (err < 0)
0732 nft_unregister_expr(&nft_tunnel_type);
0733
0734 return err;
0735 }
0736
0737 static void __exit nft_tunnel_module_exit(void)
0738 {
0739 nft_unregister_obj(&nft_tunnel_obj_type);
0740 nft_unregister_expr(&nft_tunnel_type);
0741 }
0742
0743 module_init(nft_tunnel_module_init);
0744 module_exit(nft_tunnel_module_exit);
0745
0746 MODULE_LICENSE("GPL");
0747 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
0748 MODULE_ALIAS_NFT_EXPR("tunnel");
0749 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
0750 MODULE_DESCRIPTION("nftables tunnel expression support");