0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/types.h>
0011 #include <linux/kernel.h>
0012 #include <linux/string.h>
0013 #include <linux/errno.h>
0014 #include <linux/skbuff.h>
0015 #include <linux/rtnetlink.h>
0016 #include <linux/module.h>
0017 #include <linux/init.h>
0018 #include <linux/slab.h>
0019 #include <net/netlink.h>
0020 #include <net/pkt_sched.h>
0021 #include <linux/tc_act/tc_ipt.h>
0022 #include <net/tc_act/tc_ipt.h>
0023
0024 #include <linux/netfilter_ipv4/ip_tables.h>
0025
0026
0027 static unsigned int ipt_net_id;
0028 static struct tc_action_ops act_ipt_ops;
0029
0030 static unsigned int xt_net_id;
0031 static struct tc_action_ops act_xt_ops;
0032
0033 static int ipt_init_target(struct net *net, struct xt_entry_target *t,
0034 char *table, unsigned int hook)
0035 {
0036 struct xt_tgchk_param par;
0037 struct xt_target *target;
0038 struct ipt_entry e = {};
0039 int ret = 0;
0040
0041 target = xt_request_find_target(AF_INET, t->u.user.name,
0042 t->u.user.revision);
0043 if (IS_ERR(target))
0044 return PTR_ERR(target);
0045
0046 t->u.kernel.target = target;
0047 memset(&par, 0, sizeof(par));
0048 par.net = net;
0049 par.table = table;
0050 par.entryinfo = &e;
0051 par.target = target;
0052 par.targinfo = t->data;
0053 par.hook_mask = hook;
0054 par.family = NFPROTO_IPV4;
0055
0056 ret = xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false);
0057 if (ret < 0) {
0058 module_put(t->u.kernel.target->me);
0059 return ret;
0060 }
0061 return 0;
0062 }
0063
0064 static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
0065 {
0066 struct xt_tgdtor_param par = {
0067 .target = t->u.kernel.target,
0068 .targinfo = t->data,
0069 .family = NFPROTO_IPV4,
0070 .net = net,
0071 };
0072 if (par.target->destroy != NULL)
0073 par.target->destroy(&par);
0074 module_put(par.target->me);
0075 }
0076
0077 static void tcf_ipt_release(struct tc_action *a)
0078 {
0079 struct tcf_ipt *ipt = to_ipt(a);
0080
0081 if (ipt->tcfi_t) {
0082 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
0083 kfree(ipt->tcfi_t);
0084 }
0085 kfree(ipt->tcfi_tname);
0086 }
0087
0088 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
0089 [TCA_IPT_TABLE] = { .type = NLA_STRING, .len = IFNAMSIZ },
0090 [TCA_IPT_HOOK] = { .type = NLA_U32 },
0091 [TCA_IPT_INDEX] = { .type = NLA_U32 },
0092 [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
0093 };
0094
0095 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
0096 struct nlattr *est, struct tc_action **a,
0097 const struct tc_action_ops *ops,
0098 struct tcf_proto *tp, u32 flags)
0099 {
0100 struct tc_action_net *tn = net_generic(net, id);
0101 bool bind = flags & TCA_ACT_FLAGS_BIND;
0102 struct nlattr *tb[TCA_IPT_MAX + 1];
0103 struct tcf_ipt *ipt;
0104 struct xt_entry_target *td, *t;
0105 char *tname;
0106 bool exists = false;
0107 int ret = 0, err;
0108 u32 hook = 0;
0109 u32 index = 0;
0110
0111 if (nla == NULL)
0112 return -EINVAL;
0113
0114 err = nla_parse_nested_deprecated(tb, TCA_IPT_MAX, nla, ipt_policy,
0115 NULL);
0116 if (err < 0)
0117 return err;
0118
0119 if (tb[TCA_IPT_INDEX] != NULL)
0120 index = nla_get_u32(tb[TCA_IPT_INDEX]);
0121
0122 err = tcf_idr_check_alloc(tn, &index, a, bind);
0123 if (err < 0)
0124 return err;
0125 exists = err;
0126 if (exists && bind)
0127 return 0;
0128
0129 if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
0130 if (exists)
0131 tcf_idr_release(*a, bind);
0132 else
0133 tcf_idr_cleanup(tn, index);
0134 return -EINVAL;
0135 }
0136
0137 td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
0138 if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
0139 if (exists)
0140 tcf_idr_release(*a, bind);
0141 else
0142 tcf_idr_cleanup(tn, index);
0143 return -EINVAL;
0144 }
0145
0146 if (!exists) {
0147 ret = tcf_idr_create(tn, index, est, a, ops, bind,
0148 false, flags);
0149 if (ret) {
0150 tcf_idr_cleanup(tn, index);
0151 return ret;
0152 }
0153 ret = ACT_P_CREATED;
0154 } else {
0155 if (bind)
0156 return 0;
0157
0158 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
0159 tcf_idr_release(*a, bind);
0160 return -EEXIST;
0161 }
0162 }
0163 hook = nla_get_u32(tb[TCA_IPT_HOOK]);
0164
0165 err = -ENOMEM;
0166 tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
0167 if (unlikely(!tname))
0168 goto err1;
0169 if (tb[TCA_IPT_TABLE] == NULL ||
0170 nla_strscpy(tname, tb[TCA_IPT_TABLE], IFNAMSIZ) >= IFNAMSIZ)
0171 strcpy(tname, "mangle");
0172
0173 t = kmemdup(td, td->u.target_size, GFP_KERNEL);
0174 if (unlikely(!t))
0175 goto err2;
0176
0177 err = ipt_init_target(net, t, tname, hook);
0178 if (err < 0)
0179 goto err3;
0180
0181 ipt = to_ipt(*a);
0182
0183 spin_lock_bh(&ipt->tcf_lock);
0184 if (ret != ACT_P_CREATED) {
0185 ipt_destroy_target(ipt->tcfi_t, net);
0186 kfree(ipt->tcfi_tname);
0187 kfree(ipt->tcfi_t);
0188 }
0189 ipt->tcfi_tname = tname;
0190 ipt->tcfi_t = t;
0191 ipt->tcfi_hook = hook;
0192 spin_unlock_bh(&ipt->tcf_lock);
0193 return ret;
0194
0195 err3:
0196 kfree(t);
0197 err2:
0198 kfree(tname);
0199 err1:
0200 tcf_idr_release(*a, bind);
0201 return err;
0202 }
0203
0204 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
0205 struct nlattr *est, struct tc_action **a,
0206 struct tcf_proto *tp,
0207 u32 flags, struct netlink_ext_ack *extack)
0208 {
0209 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops,
0210 tp, flags);
0211 }
0212
0213 static int tcf_xt_init(struct net *net, struct nlattr *nla,
0214 struct nlattr *est, struct tc_action **a,
0215 struct tcf_proto *tp,
0216 u32 flags, struct netlink_ext_ack *extack)
0217 {
0218 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops,
0219 tp, flags);
0220 }
0221
0222 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
0223 struct tcf_result *res)
0224 {
0225 int ret = 0, result = 0;
0226 struct tcf_ipt *ipt = to_ipt(a);
0227 struct xt_action_param par;
0228 struct nf_hook_state state = {
0229 .net = dev_net(skb->dev),
0230 .in = skb->dev,
0231 .hook = ipt->tcfi_hook,
0232 .pf = NFPROTO_IPV4,
0233 };
0234
0235 if (skb_unclone(skb, GFP_ATOMIC))
0236 return TC_ACT_UNSPEC;
0237
0238 spin_lock(&ipt->tcf_lock);
0239
0240 tcf_lastuse_update(&ipt->tcf_tm);
0241 bstats_update(&ipt->tcf_bstats, skb);
0242
0243
0244
0245
0246
0247 par.state = &state;
0248 par.target = ipt->tcfi_t->u.kernel.target;
0249 par.targinfo = ipt->tcfi_t->data;
0250 ret = par.target->target(skb, &par);
0251
0252 switch (ret) {
0253 case NF_ACCEPT:
0254 result = TC_ACT_OK;
0255 break;
0256 case NF_DROP:
0257 result = TC_ACT_SHOT;
0258 ipt->tcf_qstats.drops++;
0259 break;
0260 case XT_CONTINUE:
0261 result = TC_ACT_PIPE;
0262 break;
0263 default:
0264 net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n",
0265 ret);
0266 result = TC_ACT_OK;
0267 break;
0268 }
0269 spin_unlock(&ipt->tcf_lock);
0270 return result;
0271
0272 }
0273
0274 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind,
0275 int ref)
0276 {
0277 unsigned char *b = skb_tail_pointer(skb);
0278 struct tcf_ipt *ipt = to_ipt(a);
0279 struct xt_entry_target *t;
0280 struct tcf_t tm;
0281 struct tc_cnt c;
0282
0283
0284
0285
0286
0287
0288 spin_lock_bh(&ipt->tcf_lock);
0289 t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
0290 if (unlikely(!t))
0291 goto nla_put_failure;
0292
0293 c.bindcnt = atomic_read(&ipt->tcf_bindcnt) - bind;
0294 c.refcnt = refcount_read(&ipt->tcf_refcnt) - ref;
0295 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
0296
0297 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
0298 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
0299 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
0300 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
0301 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
0302 goto nla_put_failure;
0303
0304 tcf_tm_dump(&tm, &ipt->tcf_tm);
0305 if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD))
0306 goto nla_put_failure;
0307
0308 spin_unlock_bh(&ipt->tcf_lock);
0309 kfree(t);
0310 return skb->len;
0311
0312 nla_put_failure:
0313 spin_unlock_bh(&ipt->tcf_lock);
0314 nlmsg_trim(skb, b);
0315 kfree(t);
0316 return -1;
0317 }
0318
0319 static int tcf_ipt_walker(struct net *net, struct sk_buff *skb,
0320 struct netlink_callback *cb, int type,
0321 const struct tc_action_ops *ops,
0322 struct netlink_ext_ack *extack)
0323 {
0324 struct tc_action_net *tn = net_generic(net, ipt_net_id);
0325
0326 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
0327 }
0328
0329 static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index)
0330 {
0331 struct tc_action_net *tn = net_generic(net, ipt_net_id);
0332
0333 return tcf_idr_search(tn, a, index);
0334 }
0335
0336 static struct tc_action_ops act_ipt_ops = {
0337 .kind = "ipt",
0338 .id = TCA_ID_IPT,
0339 .owner = THIS_MODULE,
0340 .act = tcf_ipt_act,
0341 .dump = tcf_ipt_dump,
0342 .cleanup = tcf_ipt_release,
0343 .init = tcf_ipt_init,
0344 .walk = tcf_ipt_walker,
0345 .lookup = tcf_ipt_search,
0346 .size = sizeof(struct tcf_ipt),
0347 };
0348
0349 static __net_init int ipt_init_net(struct net *net)
0350 {
0351 struct tc_action_net *tn = net_generic(net, ipt_net_id);
0352
0353 return tc_action_net_init(net, tn, &act_ipt_ops);
0354 }
0355
0356 static void __net_exit ipt_exit_net(struct list_head *net_list)
0357 {
0358 tc_action_net_exit(net_list, ipt_net_id);
0359 }
0360
0361 static struct pernet_operations ipt_net_ops = {
0362 .init = ipt_init_net,
0363 .exit_batch = ipt_exit_net,
0364 .id = &ipt_net_id,
0365 .size = sizeof(struct tc_action_net),
0366 };
0367
0368 static int tcf_xt_walker(struct net *net, struct sk_buff *skb,
0369 struct netlink_callback *cb, int type,
0370 const struct tc_action_ops *ops,
0371 struct netlink_ext_ack *extack)
0372 {
0373 struct tc_action_net *tn = net_generic(net, xt_net_id);
0374
0375 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
0376 }
0377
0378 static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index)
0379 {
0380 struct tc_action_net *tn = net_generic(net, xt_net_id);
0381
0382 return tcf_idr_search(tn, a, index);
0383 }
0384
0385 static struct tc_action_ops act_xt_ops = {
0386 .kind = "xt",
0387 .id = TCA_ID_XT,
0388 .owner = THIS_MODULE,
0389 .act = tcf_ipt_act,
0390 .dump = tcf_ipt_dump,
0391 .cleanup = tcf_ipt_release,
0392 .init = tcf_xt_init,
0393 .walk = tcf_xt_walker,
0394 .lookup = tcf_xt_search,
0395 .size = sizeof(struct tcf_ipt),
0396 };
0397
0398 static __net_init int xt_init_net(struct net *net)
0399 {
0400 struct tc_action_net *tn = net_generic(net, xt_net_id);
0401
0402 return tc_action_net_init(net, tn, &act_xt_ops);
0403 }
0404
0405 static void __net_exit xt_exit_net(struct list_head *net_list)
0406 {
0407 tc_action_net_exit(net_list, xt_net_id);
0408 }
0409
0410 static struct pernet_operations xt_net_ops = {
0411 .init = xt_init_net,
0412 .exit_batch = xt_exit_net,
0413 .id = &xt_net_id,
0414 .size = sizeof(struct tc_action_net),
0415 };
0416
0417 MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
0418 MODULE_DESCRIPTION("Iptables target actions");
0419 MODULE_LICENSE("GPL");
0420 MODULE_ALIAS("act_xt");
0421
0422 static int __init ipt_init_module(void)
0423 {
0424 int ret1, ret2;
0425
0426 ret1 = tcf_register_action(&act_xt_ops, &xt_net_ops);
0427 if (ret1 < 0)
0428 pr_err("Failed to load xt action\n");
0429
0430 ret2 = tcf_register_action(&act_ipt_ops, &ipt_net_ops);
0431 if (ret2 < 0)
0432 pr_err("Failed to load ipt action\n");
0433
0434 if (ret1 < 0 && ret2 < 0) {
0435 return ret1;
0436 } else
0437 return 0;
0438 }
0439
0440 static void __exit ipt_cleanup_module(void)
0441 {
0442 tcf_unregister_action(&act_ipt_ops, &ipt_net_ops);
0443 tcf_unregister_action(&act_xt_ops, &xt_net_ops);
0444 }
0445
0446 module_init(ipt_init_module);
0447 module_exit(ipt_cleanup_module);