0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0013 #include <linux/kernel.h>
0014 #include <linux/module.h>
0015 #include <linux/socket.h>
0016 #include <linux/net.h>
0017 #include <linux/proc_fs.h>
0018 #include <linux/seq_file.h>
0019 #include <linux/string.h>
0020 #include <linux/vmalloc.h>
0021 #include <linux/mutex.h>
0022 #include <linux/mm.h>
0023 #include <linux/slab.h>
0024 #include <linux/audit.h>
0025 #include <linux/user_namespace.h>
0026 #include <net/net_namespace.h>
0027 #include <net/netns/generic.h>
0028
0029 #include <linux/netfilter/x_tables.h>
0030 #include <linux/netfilter_arp.h>
0031 #include <linux/netfilter_ipv4/ip_tables.h>
0032 #include <linux/netfilter_ipv6/ip6_tables.h>
0033 #include <linux/netfilter_arp/arp_tables.h>
0034
0035 MODULE_LICENSE("GPL");
0036 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
0037 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
0038
0039 #define XT_PCPU_BLOCK_SIZE 4096
0040 #define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
0041
0042 struct xt_template {
0043 struct list_head list;
0044
0045
0046 int (*table_init)(struct net *net);
0047
0048 struct module *me;
0049
0050
0051 char name[XT_TABLE_MAXNAMELEN];
0052 };
0053
0054 static struct list_head xt_templates[NFPROTO_NUMPROTO];
0055
0056 struct xt_pernet {
0057 struct list_head tables[NFPROTO_NUMPROTO];
0058 };
0059
0060 struct compat_delta {
0061 unsigned int offset;
0062 int delta;
0063 };
0064
0065 struct xt_af {
0066 struct mutex mutex;
0067 struct list_head match;
0068 struct list_head target;
0069 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
0070 struct mutex compat_mutex;
0071 struct compat_delta *compat_tab;
0072 unsigned int number;
0073 unsigned int cur;
0074 #endif
0075 };
0076
0077 static unsigned int xt_pernet_id __read_mostly;
0078 static struct xt_af *xt __read_mostly;
0079
0080 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
0081 [NFPROTO_UNSPEC] = "x",
0082 [NFPROTO_IPV4] = "ip",
0083 [NFPROTO_ARP] = "arp",
0084 [NFPROTO_BRIDGE] = "eb",
0085 [NFPROTO_IPV6] = "ip6",
0086 };
0087
0088
0089 int xt_register_target(struct xt_target *target)
0090 {
0091 u_int8_t af = target->family;
0092
0093 mutex_lock(&xt[af].mutex);
0094 list_add(&target->list, &xt[af].target);
0095 mutex_unlock(&xt[af].mutex);
0096 return 0;
0097 }
0098 EXPORT_SYMBOL(xt_register_target);
0099
0100 void
0101 xt_unregister_target(struct xt_target *target)
0102 {
0103 u_int8_t af = target->family;
0104
0105 mutex_lock(&xt[af].mutex);
0106 list_del(&target->list);
0107 mutex_unlock(&xt[af].mutex);
0108 }
0109 EXPORT_SYMBOL(xt_unregister_target);
0110
0111 int
0112 xt_register_targets(struct xt_target *target, unsigned int n)
0113 {
0114 unsigned int i;
0115 int err = 0;
0116
0117 for (i = 0; i < n; i++) {
0118 err = xt_register_target(&target[i]);
0119 if (err)
0120 goto err;
0121 }
0122 return err;
0123
0124 err:
0125 if (i > 0)
0126 xt_unregister_targets(target, i);
0127 return err;
0128 }
0129 EXPORT_SYMBOL(xt_register_targets);
0130
0131 void
0132 xt_unregister_targets(struct xt_target *target, unsigned int n)
0133 {
0134 while (n-- > 0)
0135 xt_unregister_target(&target[n]);
0136 }
0137 EXPORT_SYMBOL(xt_unregister_targets);
0138
0139 int xt_register_match(struct xt_match *match)
0140 {
0141 u_int8_t af = match->family;
0142
0143 mutex_lock(&xt[af].mutex);
0144 list_add(&match->list, &xt[af].match);
0145 mutex_unlock(&xt[af].mutex);
0146 return 0;
0147 }
0148 EXPORT_SYMBOL(xt_register_match);
0149
0150 void
0151 xt_unregister_match(struct xt_match *match)
0152 {
0153 u_int8_t af = match->family;
0154
0155 mutex_lock(&xt[af].mutex);
0156 list_del(&match->list);
0157 mutex_unlock(&xt[af].mutex);
0158 }
0159 EXPORT_SYMBOL(xt_unregister_match);
0160
0161 int
0162 xt_register_matches(struct xt_match *match, unsigned int n)
0163 {
0164 unsigned int i;
0165 int err = 0;
0166
0167 for (i = 0; i < n; i++) {
0168 err = xt_register_match(&match[i]);
0169 if (err)
0170 goto err;
0171 }
0172 return err;
0173
0174 err:
0175 if (i > 0)
0176 xt_unregister_matches(match, i);
0177 return err;
0178 }
0179 EXPORT_SYMBOL(xt_register_matches);
0180
0181 void
0182 xt_unregister_matches(struct xt_match *match, unsigned int n)
0183 {
0184 while (n-- > 0)
0185 xt_unregister_match(&match[n]);
0186 }
0187 EXPORT_SYMBOL(xt_unregister_matches);
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
0198 {
0199 struct xt_match *m;
0200 int err = -ENOENT;
0201
0202 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
0203 return ERR_PTR(-EINVAL);
0204
0205 mutex_lock(&xt[af].mutex);
0206 list_for_each_entry(m, &xt[af].match, list) {
0207 if (strcmp(m->name, name) == 0) {
0208 if (m->revision == revision) {
0209 if (try_module_get(m->me)) {
0210 mutex_unlock(&xt[af].mutex);
0211 return m;
0212 }
0213 } else
0214 err = -EPROTOTYPE;
0215 }
0216 }
0217 mutex_unlock(&xt[af].mutex);
0218
0219 if (af != NFPROTO_UNSPEC)
0220
0221 return xt_find_match(NFPROTO_UNSPEC, name, revision);
0222
0223 return ERR_PTR(err);
0224 }
0225 EXPORT_SYMBOL(xt_find_match);
0226
0227 struct xt_match *
0228 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
0229 {
0230 struct xt_match *match;
0231
0232 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
0233 return ERR_PTR(-EINVAL);
0234
0235 match = xt_find_match(nfproto, name, revision);
0236 if (IS_ERR(match)) {
0237 request_module("%st_%s", xt_prefix[nfproto], name);
0238 match = xt_find_match(nfproto, name, revision);
0239 }
0240
0241 return match;
0242 }
0243 EXPORT_SYMBOL_GPL(xt_request_find_match);
0244
0245
0246 static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
0247 {
0248 struct xt_target *t;
0249 int err = -ENOENT;
0250
0251 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
0252 return ERR_PTR(-EINVAL);
0253
0254 mutex_lock(&xt[af].mutex);
0255 list_for_each_entry(t, &xt[af].target, list) {
0256 if (strcmp(t->name, name) == 0) {
0257 if (t->revision == revision) {
0258 if (try_module_get(t->me)) {
0259 mutex_unlock(&xt[af].mutex);
0260 return t;
0261 }
0262 } else
0263 err = -EPROTOTYPE;
0264 }
0265 }
0266 mutex_unlock(&xt[af].mutex);
0267
0268 if (af != NFPROTO_UNSPEC)
0269
0270 return xt_find_target(NFPROTO_UNSPEC, name, revision);
0271
0272 return ERR_PTR(err);
0273 }
0274
0275 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
0276 {
0277 struct xt_target *target;
0278
0279 if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
0280 return ERR_PTR(-EINVAL);
0281
0282 target = xt_find_target(af, name, revision);
0283 if (IS_ERR(target)) {
0284 request_module("%st_%s", xt_prefix[af], name);
0285 target = xt_find_target(af, name, revision);
0286 }
0287
0288 return target;
0289 }
0290 EXPORT_SYMBOL_GPL(xt_request_find_target);
0291
0292
0293 static int xt_obj_to_user(u16 __user *psize, u16 size,
0294 void __user *pname, const char *name,
0295 u8 __user *prev, u8 rev)
0296 {
0297 if (put_user(size, psize))
0298 return -EFAULT;
0299 if (copy_to_user(pname, name, strlen(name) + 1))
0300 return -EFAULT;
0301 if (put_user(rev, prev))
0302 return -EFAULT;
0303
0304 return 0;
0305 }
0306
0307 #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
0308 xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
0309 U->u.user.name, K->u.kernel.TYPE->name, \
0310 &U->u.user.revision, K->u.kernel.TYPE->revision)
0311
0312 int xt_data_to_user(void __user *dst, const void *src,
0313 int usersize, int size, int aligned_size)
0314 {
0315 usersize = usersize ? : size;
0316 if (copy_to_user(dst, src, usersize))
0317 return -EFAULT;
0318 if (usersize != aligned_size &&
0319 clear_user(dst + usersize, aligned_size - usersize))
0320 return -EFAULT;
0321
0322 return 0;
0323 }
0324 EXPORT_SYMBOL_GPL(xt_data_to_user);
0325
0326 #define XT_DATA_TO_USER(U, K, TYPE) \
0327 xt_data_to_user(U->data, K->data, \
0328 K->u.kernel.TYPE->usersize, \
0329 K->u.kernel.TYPE->TYPE##size, \
0330 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
0331
0332 int xt_match_to_user(const struct xt_entry_match *m,
0333 struct xt_entry_match __user *u)
0334 {
0335 return XT_OBJ_TO_USER(u, m, match, 0) ||
0336 XT_DATA_TO_USER(u, m, match);
0337 }
0338 EXPORT_SYMBOL_GPL(xt_match_to_user);
0339
0340 int xt_target_to_user(const struct xt_entry_target *t,
0341 struct xt_entry_target __user *u)
0342 {
0343 return XT_OBJ_TO_USER(u, t, target, 0) ||
0344 XT_DATA_TO_USER(u, t, target);
0345 }
0346 EXPORT_SYMBOL_GPL(xt_target_to_user);
0347
0348 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
0349 {
0350 const struct xt_match *m;
0351 int have_rev = 0;
0352
0353 mutex_lock(&xt[af].mutex);
0354 list_for_each_entry(m, &xt[af].match, list) {
0355 if (strcmp(m->name, name) == 0) {
0356 if (m->revision > *bestp)
0357 *bestp = m->revision;
0358 if (m->revision == revision)
0359 have_rev = 1;
0360 }
0361 }
0362 mutex_unlock(&xt[af].mutex);
0363
0364 if (af != NFPROTO_UNSPEC && !have_rev)
0365 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
0366
0367 return have_rev;
0368 }
0369
0370 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
0371 {
0372 const struct xt_target *t;
0373 int have_rev = 0;
0374
0375 mutex_lock(&xt[af].mutex);
0376 list_for_each_entry(t, &xt[af].target, list) {
0377 if (strcmp(t->name, name) == 0) {
0378 if (t->revision > *bestp)
0379 *bestp = t->revision;
0380 if (t->revision == revision)
0381 have_rev = 1;
0382 }
0383 }
0384 mutex_unlock(&xt[af].mutex);
0385
0386 if (af != NFPROTO_UNSPEC && !have_rev)
0387 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
0388
0389 return have_rev;
0390 }
0391
0392
0393 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
0394 int *err)
0395 {
0396 int have_rev, best = -1;
0397
0398 if (target == 1)
0399 have_rev = target_revfn(af, name, revision, &best);
0400 else
0401 have_rev = match_revfn(af, name, revision, &best);
0402
0403
0404 if (best == -1) {
0405 *err = -ENOENT;
0406 return 0;
0407 }
0408
0409 *err = best;
0410 if (!have_rev)
0411 *err = -EPROTONOSUPPORT;
0412 return 1;
0413 }
0414 EXPORT_SYMBOL_GPL(xt_find_revision);
0415
0416 static char *
0417 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
0418 {
0419 static const char *const inetbr_names[] = {
0420 "PREROUTING", "INPUT", "FORWARD",
0421 "OUTPUT", "POSTROUTING", "BROUTING",
0422 };
0423 static const char *const arp_names[] = {
0424 "INPUT", "FORWARD", "OUTPUT",
0425 };
0426 const char *const *names;
0427 unsigned int i, max;
0428 char *p = buf;
0429 bool np = false;
0430 int res;
0431
0432 names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
0433 max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
0434 ARRAY_SIZE(inetbr_names);
0435 *p = '\0';
0436 for (i = 0; i < max; ++i) {
0437 if (!(mask & (1 << i)))
0438 continue;
0439 res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
0440 if (res > 0) {
0441 size -= res;
0442 p += res;
0443 }
0444 np = true;
0445 }
0446
0447 return buf;
0448 }
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463 int xt_check_proc_name(const char *name, unsigned int size)
0464 {
0465 if (name[0] == '\0')
0466 return -EINVAL;
0467
0468 if (strnlen(name, size) == size)
0469 return -ENAMETOOLONG;
0470
0471 if (strcmp(name, ".") == 0 ||
0472 strcmp(name, "..") == 0 ||
0473 strchr(name, '/'))
0474 return -EINVAL;
0475
0476 return 0;
0477 }
0478 EXPORT_SYMBOL(xt_check_proc_name);
0479
0480 int xt_check_match(struct xt_mtchk_param *par,
0481 unsigned int size, u16 proto, bool inv_proto)
0482 {
0483 int ret;
0484
0485 if (XT_ALIGN(par->match->matchsize) != size &&
0486 par->match->matchsize != -1) {
0487
0488
0489
0490
0491 pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
0492 xt_prefix[par->family], par->match->name,
0493 par->match->revision,
0494 XT_ALIGN(par->match->matchsize), size);
0495 return -EINVAL;
0496 }
0497 if (par->match->table != NULL &&
0498 strcmp(par->match->table, par->table) != 0) {
0499 pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
0500 xt_prefix[par->family], par->match->name,
0501 par->match->table, par->table);
0502 return -EINVAL;
0503 }
0504 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
0505 char used[64], allow[64];
0506
0507 pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
0508 xt_prefix[par->family], par->match->name,
0509 textify_hooks(used, sizeof(used),
0510 par->hook_mask, par->family),
0511 textify_hooks(allow, sizeof(allow),
0512 par->match->hooks,
0513 par->family));
0514 return -EINVAL;
0515 }
0516 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
0517 pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
0518 xt_prefix[par->family], par->match->name,
0519 par->match->proto);
0520 return -EINVAL;
0521 }
0522 if (par->match->checkentry != NULL) {
0523 ret = par->match->checkentry(par);
0524 if (ret < 0)
0525 return ret;
0526 else if (ret > 0)
0527
0528 return -EIO;
0529 }
0530 return 0;
0531 }
0532 EXPORT_SYMBOL_GPL(xt_check_match);
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545 static int xt_check_entry_match(const char *match, const char *target,
0546 const size_t alignment)
0547 {
0548 const struct xt_entry_match *pos;
0549 int length = target - match;
0550
0551 if (length == 0)
0552 return 0;
0553
0554 pos = (struct xt_entry_match *)match;
0555 do {
0556 if ((unsigned long)pos % alignment)
0557 return -EINVAL;
0558
0559 if (length < (int)sizeof(struct xt_entry_match))
0560 return -EINVAL;
0561
0562 if (pos->u.match_size < sizeof(struct xt_entry_match))
0563 return -EINVAL;
0564
0565 if (pos->u.match_size > length)
0566 return -EINVAL;
0567
0568 length -= pos->u.match_size;
0569 pos = ((void *)((char *)(pos) + (pos)->u.match_size));
0570 } while (length > 0);
0571
0572 return 0;
0573 }
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584 int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
0585 {
0586 const char *err = "unsorted underflow";
0587 unsigned int i, max_uflow, max_entry;
0588 bool check_hooks = false;
0589
0590 BUILD_BUG_ON(ARRAY_SIZE(info->hook_entry) != ARRAY_SIZE(info->underflow));
0591
0592 max_entry = 0;
0593 max_uflow = 0;
0594
0595 for (i = 0; i < ARRAY_SIZE(info->hook_entry); i++) {
0596 if (!(valid_hooks & (1 << i)))
0597 continue;
0598
0599 if (info->hook_entry[i] == 0xFFFFFFFF)
0600 return -EINVAL;
0601 if (info->underflow[i] == 0xFFFFFFFF)
0602 return -EINVAL;
0603
0604 if (check_hooks) {
0605 if (max_uflow > info->underflow[i])
0606 goto error;
0607
0608 if (max_uflow == info->underflow[i]) {
0609 err = "duplicate underflow";
0610 goto error;
0611 }
0612 if (max_entry > info->hook_entry[i]) {
0613 err = "unsorted entry";
0614 goto error;
0615 }
0616 if (max_entry == info->hook_entry[i]) {
0617 err = "duplicate entry";
0618 goto error;
0619 }
0620 }
0621 max_entry = info->hook_entry[i];
0622 max_uflow = info->underflow[i];
0623 check_hooks = true;
0624 }
0625
0626 return 0;
0627 error:
0628 pr_err_ratelimited("%s at hook %d\n", err, i);
0629 return -EINVAL;
0630 }
0631 EXPORT_SYMBOL(xt_check_table_hooks);
0632
0633 static bool verdict_ok(int verdict)
0634 {
0635 if (verdict > 0)
0636 return true;
0637
0638 if (verdict < 0) {
0639 int v = -verdict - 1;
0640
0641 if (verdict == XT_RETURN)
0642 return true;
0643
0644 switch (v) {
0645 case NF_ACCEPT: return true;
0646 case NF_DROP: return true;
0647 case NF_QUEUE: return true;
0648 default:
0649 break;
0650 }
0651
0652 return false;
0653 }
0654
0655 return false;
0656 }
0657
0658 static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
0659 const char *msg, unsigned int msglen)
0660 {
0661 return usersize == kernsize && strnlen(msg, msglen) < msglen;
0662 }
0663
0664 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
0665 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
0666 {
0667 struct xt_af *xp = &xt[af];
0668
0669 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
0670
0671 if (WARN_ON(!xp->compat_tab))
0672 return -ENOMEM;
0673
0674 if (xp->cur >= xp->number)
0675 return -EINVAL;
0676
0677 if (xp->cur)
0678 delta += xp->compat_tab[xp->cur - 1].delta;
0679 xp->compat_tab[xp->cur].offset = offset;
0680 xp->compat_tab[xp->cur].delta = delta;
0681 xp->cur++;
0682 return 0;
0683 }
0684 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
0685
0686 void xt_compat_flush_offsets(u_int8_t af)
0687 {
0688 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
0689
0690 if (xt[af].compat_tab) {
0691 vfree(xt[af].compat_tab);
0692 xt[af].compat_tab = NULL;
0693 xt[af].number = 0;
0694 xt[af].cur = 0;
0695 }
0696 }
0697 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
0698
0699 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
0700 {
0701 struct compat_delta *tmp = xt[af].compat_tab;
0702 int mid, left = 0, right = xt[af].cur - 1;
0703
0704 while (left <= right) {
0705 mid = (left + right) >> 1;
0706 if (offset > tmp[mid].offset)
0707 left = mid + 1;
0708 else if (offset < tmp[mid].offset)
0709 right = mid - 1;
0710 else
0711 return mid ? tmp[mid - 1].delta : 0;
0712 }
0713 return left ? tmp[left - 1].delta : 0;
0714 }
0715 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
0716
0717 int xt_compat_init_offsets(u8 af, unsigned int number)
0718 {
0719 size_t mem;
0720
0721 WARN_ON(!mutex_is_locked(&xt[af].compat_mutex));
0722
0723 if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
0724 return -EINVAL;
0725
0726 if (WARN_ON(xt[af].compat_tab))
0727 return -EINVAL;
0728
0729 mem = sizeof(struct compat_delta) * number;
0730 if (mem > XT_MAX_TABLE_SIZE)
0731 return -ENOMEM;
0732
0733 xt[af].compat_tab = vmalloc(mem);
0734 if (!xt[af].compat_tab)
0735 return -ENOMEM;
0736
0737 xt[af].number = number;
0738 xt[af].cur = 0;
0739
0740 return 0;
0741 }
0742 EXPORT_SYMBOL(xt_compat_init_offsets);
0743
0744 int xt_compat_match_offset(const struct xt_match *match)
0745 {
0746 u_int16_t csize = match->compatsize ? : match->matchsize;
0747 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
0748 }
0749 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
0750
0751 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
0752 unsigned int *size)
0753 {
0754 const struct xt_match *match = m->u.kernel.match;
0755 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
0756 int off = xt_compat_match_offset(match);
0757 u_int16_t msize = cm->u.user.match_size;
0758 char name[sizeof(m->u.user.name)];
0759
0760 m = *dstptr;
0761 memcpy(m, cm, sizeof(*cm));
0762 if (match->compat_from_user)
0763 match->compat_from_user(m->data, cm->data);
0764 else
0765 memcpy(m->data, cm->data, msize - sizeof(*cm));
0766
0767 msize += off;
0768 m->u.user.match_size = msize;
0769 strlcpy(name, match->name, sizeof(name));
0770 module_put(match->me);
0771 strncpy(m->u.user.name, name, sizeof(m->u.user.name));
0772
0773 *size += off;
0774 *dstptr += msize;
0775 }
0776 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
0777
0778 #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
0779 xt_data_to_user(U->data, K->data, \
0780 K->u.kernel.TYPE->usersize, \
0781 C_SIZE, \
0782 COMPAT_XT_ALIGN(C_SIZE))
0783
0784 int xt_compat_match_to_user(const struct xt_entry_match *m,
0785 void __user **dstptr, unsigned int *size)
0786 {
0787 const struct xt_match *match = m->u.kernel.match;
0788 struct compat_xt_entry_match __user *cm = *dstptr;
0789 int off = xt_compat_match_offset(match);
0790 u_int16_t msize = m->u.user.match_size - off;
0791
0792 if (XT_OBJ_TO_USER(cm, m, match, msize))
0793 return -EFAULT;
0794
0795 if (match->compat_to_user) {
0796 if (match->compat_to_user((void __user *)cm->data, m->data))
0797 return -EFAULT;
0798 } else {
0799 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
0800 return -EFAULT;
0801 }
0802
0803 *size -= off;
0804 *dstptr += msize;
0805 return 0;
0806 }
0807 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
0808
0809
0810 struct compat_xt_standard_target {
0811 struct compat_xt_entry_target t;
0812 compat_uint_t verdict;
0813 };
0814
0815 struct compat_xt_error_target {
0816 struct compat_xt_entry_target t;
0817 char errorname[XT_FUNCTION_MAXNAMELEN];
0818 };
0819
0820 int xt_compat_check_entry_offsets(const void *base, const char *elems,
0821 unsigned int target_offset,
0822 unsigned int next_offset)
0823 {
0824 long size_of_base_struct = elems - (const char *)base;
0825 const struct compat_xt_entry_target *t;
0826 const char *e = base;
0827
0828 if (target_offset < size_of_base_struct)
0829 return -EINVAL;
0830
0831 if (target_offset + sizeof(*t) > next_offset)
0832 return -EINVAL;
0833
0834 t = (void *)(e + target_offset);
0835 if (t->u.target_size < sizeof(*t))
0836 return -EINVAL;
0837
0838 if (target_offset + t->u.target_size > next_offset)
0839 return -EINVAL;
0840
0841 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
0842 const struct compat_xt_standard_target *st = (const void *)t;
0843
0844 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
0845 return -EINVAL;
0846
0847 if (!verdict_ok(st->verdict))
0848 return -EINVAL;
0849 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
0850 const struct compat_xt_error_target *et = (const void *)t;
0851
0852 if (!error_tg_ok(t->u.target_size, sizeof(*et),
0853 et->errorname, sizeof(et->errorname)))
0854 return -EINVAL;
0855 }
0856
0857
0858
0859
0860
0861 BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
0862
0863 return xt_check_entry_match(elems, base + target_offset,
0864 __alignof__(struct compat_xt_entry_match));
0865 }
0866 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
0867 #endif
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912 int xt_check_entry_offsets(const void *base,
0913 const char *elems,
0914 unsigned int target_offset,
0915 unsigned int next_offset)
0916 {
0917 long size_of_base_struct = elems - (const char *)base;
0918 const struct xt_entry_target *t;
0919 const char *e = base;
0920
0921
0922 if (target_offset < size_of_base_struct)
0923 return -EINVAL;
0924
0925 if (target_offset + sizeof(*t) > next_offset)
0926 return -EINVAL;
0927
0928 t = (void *)(e + target_offset);
0929 if (t->u.target_size < sizeof(*t))
0930 return -EINVAL;
0931
0932 if (target_offset + t->u.target_size > next_offset)
0933 return -EINVAL;
0934
0935 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0) {
0936 const struct xt_standard_target *st = (const void *)t;
0937
0938 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset)
0939 return -EINVAL;
0940
0941 if (!verdict_ok(st->verdict))
0942 return -EINVAL;
0943 } else if (strcmp(t->u.user.name, XT_ERROR_TARGET) == 0) {
0944 const struct xt_error_target *et = (const void *)t;
0945
0946 if (!error_tg_ok(t->u.target_size, sizeof(*et),
0947 et->errorname, sizeof(et->errorname)))
0948 return -EINVAL;
0949 }
0950
0951 return xt_check_entry_match(elems, base + target_offset,
0952 __alignof__(struct xt_entry_match));
0953 }
0954 EXPORT_SYMBOL(xt_check_entry_offsets);
0955
0956
0957
0958
0959
0960
0961
0962
0963 unsigned int *xt_alloc_entry_offsets(unsigned int size)
0964 {
0965 if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
0966 return NULL;
0967
0968 return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
0969
0970 }
0971 EXPORT_SYMBOL(xt_alloc_entry_offsets);
0972
0973
0974
0975
0976
0977
0978
0979
0980 bool xt_find_jump_offset(const unsigned int *offsets,
0981 unsigned int target, unsigned int size)
0982 {
0983 int m, low = 0, hi = size;
0984
0985 while (hi > low) {
0986 m = (low + hi) / 2u;
0987
0988 if (offsets[m] > target)
0989 hi = m;
0990 else if (offsets[m] < target)
0991 low = m + 1;
0992 else
0993 return true;
0994 }
0995
0996 return false;
0997 }
0998 EXPORT_SYMBOL(xt_find_jump_offset);
0999
1000 int xt_check_target(struct xt_tgchk_param *par,
1001 unsigned int size, u16 proto, bool inv_proto)
1002 {
1003 int ret;
1004
1005 if (XT_ALIGN(par->target->targetsize) != size) {
1006 pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
1007 xt_prefix[par->family], par->target->name,
1008 par->target->revision,
1009 XT_ALIGN(par->target->targetsize), size);
1010 return -EINVAL;
1011 }
1012 if (par->target->table != NULL &&
1013 strcmp(par->target->table, par->table) != 0) {
1014 pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
1015 xt_prefix[par->family], par->target->name,
1016 par->target->table, par->table);
1017 return -EINVAL;
1018 }
1019 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
1020 char used[64], allow[64];
1021
1022 pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
1023 xt_prefix[par->family], par->target->name,
1024 textify_hooks(used, sizeof(used),
1025 par->hook_mask, par->family),
1026 textify_hooks(allow, sizeof(allow),
1027 par->target->hooks,
1028 par->family));
1029 return -EINVAL;
1030 }
1031 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
1032 pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
1033 xt_prefix[par->family], par->target->name,
1034 par->target->proto);
1035 return -EINVAL;
1036 }
1037 if (par->target->checkentry != NULL) {
1038 ret = par->target->checkentry(par);
1039 if (ret < 0)
1040 return ret;
1041 else if (ret > 0)
1042
1043 return -EIO;
1044 }
1045 return 0;
1046 }
1047 EXPORT_SYMBOL_GPL(xt_check_target);
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 void *xt_copy_counters(sockptr_t arg, unsigned int len,
1070 struct xt_counters_info *info)
1071 {
1072 size_t offset;
1073 void *mem;
1074 u64 size;
1075
1076 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1077 if (in_compat_syscall()) {
1078
1079 struct compat_xt_counters_info compat_tmp;
1080
1081 if (len <= sizeof(compat_tmp))
1082 return ERR_PTR(-EINVAL);
1083
1084 len -= sizeof(compat_tmp);
1085 if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
1086 return ERR_PTR(-EFAULT);
1087
1088 memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
1089 info->num_counters = compat_tmp.num_counters;
1090 offset = sizeof(compat_tmp);
1091 } else
1092 #endif
1093 {
1094 if (len <= sizeof(*info))
1095 return ERR_PTR(-EINVAL);
1096
1097 len -= sizeof(*info);
1098 if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
1099 return ERR_PTR(-EFAULT);
1100
1101 offset = sizeof(*info);
1102 }
1103 info->name[sizeof(info->name) - 1] = '\0';
1104
1105 size = sizeof(struct xt_counters);
1106 size *= info->num_counters;
1107
1108 if (size != (u64)len)
1109 return ERR_PTR(-EINVAL);
1110
1111 mem = vmalloc(len);
1112 if (!mem)
1113 return ERR_PTR(-ENOMEM);
1114
1115 if (copy_from_sockptr_offset(mem, arg, offset, len) == 0)
1116 return mem;
1117
1118 vfree(mem);
1119 return ERR_PTR(-EFAULT);
1120 }
1121 EXPORT_SYMBOL_GPL(xt_copy_counters);
1122
1123 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1124 int xt_compat_target_offset(const struct xt_target *target)
1125 {
1126 u_int16_t csize = target->compatsize ? : target->targetsize;
1127 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
1128 }
1129 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
1130
1131 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
1132 unsigned int *size)
1133 {
1134 const struct xt_target *target = t->u.kernel.target;
1135 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
1136 int off = xt_compat_target_offset(target);
1137 u_int16_t tsize = ct->u.user.target_size;
1138 char name[sizeof(t->u.user.name)];
1139
1140 t = *dstptr;
1141 memcpy(t, ct, sizeof(*ct));
1142 if (target->compat_from_user)
1143 target->compat_from_user(t->data, ct->data);
1144 else
1145 memcpy(t->data, ct->data, tsize - sizeof(*ct));
1146
1147 tsize += off;
1148 t->u.user.target_size = tsize;
1149 strlcpy(name, target->name, sizeof(name));
1150 module_put(target->me);
1151 strncpy(t->u.user.name, name, sizeof(t->u.user.name));
1152
1153 *size += off;
1154 *dstptr += tsize;
1155 }
1156 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
1157
1158 int xt_compat_target_to_user(const struct xt_entry_target *t,
1159 void __user **dstptr, unsigned int *size)
1160 {
1161 const struct xt_target *target = t->u.kernel.target;
1162 struct compat_xt_entry_target __user *ct = *dstptr;
1163 int off = xt_compat_target_offset(target);
1164 u_int16_t tsize = t->u.user.target_size - off;
1165
1166 if (XT_OBJ_TO_USER(ct, t, target, tsize))
1167 return -EFAULT;
1168
1169 if (target->compat_to_user) {
1170 if (target->compat_to_user((void __user *)ct->data, t->data))
1171 return -EFAULT;
1172 } else {
1173 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
1174 return -EFAULT;
1175 }
1176
1177 *size -= off;
1178 *dstptr += tsize;
1179 return 0;
1180 }
1181 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
1182 #endif
1183
1184 struct xt_table_info *xt_alloc_table_info(unsigned int size)
1185 {
1186 struct xt_table_info *info = NULL;
1187 size_t sz = sizeof(*info) + size;
1188
1189 if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
1190 return NULL;
1191
1192 info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
1193 if (!info)
1194 return NULL;
1195
1196 memset(info, 0, sizeof(*info));
1197 info->size = size;
1198 return info;
1199 }
1200 EXPORT_SYMBOL(xt_alloc_table_info);
1201
1202 void xt_free_table_info(struct xt_table_info *info)
1203 {
1204 int cpu;
1205
1206 if (info->jumpstack != NULL) {
1207 for_each_possible_cpu(cpu)
1208 kvfree(info->jumpstack[cpu]);
1209 kvfree(info->jumpstack);
1210 }
1211
1212 kvfree(info);
1213 }
1214 EXPORT_SYMBOL(xt_free_table_info);
1215
1216 struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
1217 {
1218 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1219 struct xt_table *t;
1220
1221 mutex_lock(&xt[af].mutex);
1222 list_for_each_entry(t, &xt_net->tables[af], list) {
1223 if (strcmp(t->name, name) == 0) {
1224 mutex_unlock(&xt[af].mutex);
1225 return t;
1226 }
1227 }
1228 mutex_unlock(&xt[af].mutex);
1229 return NULL;
1230 }
1231 EXPORT_SYMBOL(xt_find_table);
1232
1233
1234 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1235 const char *name)
1236 {
1237 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1238 struct module *owner = NULL;
1239 struct xt_template *tmpl;
1240 struct xt_table *t;
1241
1242 mutex_lock(&xt[af].mutex);
1243 list_for_each_entry(t, &xt_net->tables[af], list)
1244 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1245 return t;
1246
1247
1248 list_for_each_entry(tmpl, &xt_templates[af], list) {
1249 int err;
1250
1251 if (strcmp(tmpl->name, name))
1252 continue;
1253 if (!try_module_get(tmpl->me))
1254 goto out;
1255
1256 owner = tmpl->me;
1257
1258 mutex_unlock(&xt[af].mutex);
1259 err = tmpl->table_init(net);
1260 if (err < 0) {
1261 module_put(owner);
1262 return ERR_PTR(err);
1263 }
1264
1265 mutex_lock(&xt[af].mutex);
1266 break;
1267 }
1268
1269
1270 list_for_each_entry(t, &xt_net->tables[af], list)
1271 if (strcmp(t->name, name) == 0)
1272 return t;
1273
1274 module_put(owner);
1275 out:
1276 mutex_unlock(&xt[af].mutex);
1277 return ERR_PTR(-ENOENT);
1278 }
1279 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1280
1281 struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
1282 const char *name)
1283 {
1284 struct xt_table *t = xt_find_table_lock(net, af, name);
1285
1286 #ifdef CONFIG_MODULES
1287 if (IS_ERR(t)) {
1288 int err = request_module("%stable_%s", xt_prefix[af], name);
1289 if (err < 0)
1290 return ERR_PTR(err);
1291 t = xt_find_table_lock(net, af, name);
1292 }
1293 #endif
1294
1295 return t;
1296 }
1297 EXPORT_SYMBOL_GPL(xt_request_find_table_lock);
1298
1299 void xt_table_unlock(struct xt_table *table)
1300 {
1301 mutex_unlock(&xt[table->af].mutex);
1302 }
1303 EXPORT_SYMBOL_GPL(xt_table_unlock);
1304
1305 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1306 void xt_compat_lock(u_int8_t af)
1307 {
1308 mutex_lock(&xt[af].compat_mutex);
1309 }
1310 EXPORT_SYMBOL_GPL(xt_compat_lock);
1311
1312 void xt_compat_unlock(u_int8_t af)
1313 {
1314 mutex_unlock(&xt[af].compat_mutex);
1315 }
1316 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1317 #endif
1318
1319 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1320 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1321
1322 struct static_key xt_tee_enabled __read_mostly;
1323 EXPORT_SYMBOL_GPL(xt_tee_enabled);
1324
1325 static int xt_jumpstack_alloc(struct xt_table_info *i)
1326 {
1327 unsigned int size;
1328 int cpu;
1329
1330 size = sizeof(void **) * nr_cpu_ids;
1331 if (size > PAGE_SIZE)
1332 i->jumpstack = kvzalloc(size, GFP_KERNEL);
1333 else
1334 i->jumpstack = kzalloc(size, GFP_KERNEL);
1335 if (i->jumpstack == NULL)
1336 return -ENOMEM;
1337
1338
1339 if (i->stacksize == 0)
1340 return 0;
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 size = sizeof(void *) * i->stacksize * 2u;
1353 for_each_possible_cpu(cpu) {
1354 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL,
1355 cpu_to_node(cpu));
1356 if (i->jumpstack[cpu] == NULL)
1357
1358
1359
1360
1361
1362 return -ENOMEM;
1363 }
1364
1365 return 0;
1366 }
1367
1368 struct xt_counters *xt_counters_alloc(unsigned int counters)
1369 {
1370 struct xt_counters *mem;
1371
1372 if (counters == 0 || counters > INT_MAX / sizeof(*mem))
1373 return NULL;
1374
1375 counters *= sizeof(*mem);
1376 if (counters > XT_MAX_TABLE_SIZE)
1377 return NULL;
1378
1379 return vzalloc(counters);
1380 }
1381 EXPORT_SYMBOL(xt_counters_alloc);
1382
1383 struct xt_table_info *
1384 xt_replace_table(struct xt_table *table,
1385 unsigned int num_counters,
1386 struct xt_table_info *newinfo,
1387 int *error)
1388 {
1389 struct xt_table_info *private;
1390 unsigned int cpu;
1391 int ret;
1392
1393 ret = xt_jumpstack_alloc(newinfo);
1394 if (ret < 0) {
1395 *error = ret;
1396 return NULL;
1397 }
1398
1399
1400 local_bh_disable();
1401 private = table->private;
1402
1403
1404 if (num_counters != private->number) {
1405 pr_debug("num_counters != table->private->number (%u/%u)\n",
1406 num_counters, private->number);
1407 local_bh_enable();
1408 *error = -EAGAIN;
1409 return NULL;
1410 }
1411
1412 newinfo->initial_entries = private->initial_entries;
1413
1414
1415
1416
1417 smp_wmb();
1418 table->private = newinfo;
1419
1420
1421 smp_mb();
1422
1423
1424
1425
1426
1427 local_bh_enable();
1428
1429
1430 for_each_possible_cpu(cpu) {
1431 seqcount_t *s = &per_cpu(xt_recseq, cpu);
1432 u32 seq = raw_read_seqcount(s);
1433
1434 if (seq & 1) {
1435 do {
1436 cond_resched();
1437 cpu_relax();
1438 } while (seq == raw_read_seqcount(s));
1439 }
1440 }
1441
1442 audit_log_nfcfg(table->name, table->af, private->number,
1443 !private->number ? AUDIT_XT_OP_REGISTER :
1444 AUDIT_XT_OP_REPLACE,
1445 GFP_KERNEL);
1446 return private;
1447 }
1448 EXPORT_SYMBOL_GPL(xt_replace_table);
1449
1450 struct xt_table *xt_register_table(struct net *net,
1451 const struct xt_table *input_table,
1452 struct xt_table_info *bootstrap,
1453 struct xt_table_info *newinfo)
1454 {
1455 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1456 struct xt_table_info *private;
1457 struct xt_table *t, *table;
1458 int ret;
1459
1460
1461 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1462 if (!table) {
1463 ret = -ENOMEM;
1464 goto out;
1465 }
1466
1467 mutex_lock(&xt[table->af].mutex);
1468
1469 list_for_each_entry(t, &xt_net->tables[table->af], list) {
1470 if (strcmp(t->name, table->name) == 0) {
1471 ret = -EEXIST;
1472 goto unlock;
1473 }
1474 }
1475
1476
1477 table->private = bootstrap;
1478
1479 if (!xt_replace_table(table, 0, newinfo, &ret))
1480 goto unlock;
1481
1482 private = table->private;
1483 pr_debug("table->private->number = %u\n", private->number);
1484
1485
1486 private->initial_entries = private->number;
1487
1488 list_add(&table->list, &xt_net->tables[table->af]);
1489 mutex_unlock(&xt[table->af].mutex);
1490 return table;
1491
1492 unlock:
1493 mutex_unlock(&xt[table->af].mutex);
1494 kfree(table);
1495 out:
1496 return ERR_PTR(ret);
1497 }
1498 EXPORT_SYMBOL_GPL(xt_register_table);
1499
1500 void *xt_unregister_table(struct xt_table *table)
1501 {
1502 struct xt_table_info *private;
1503
1504 mutex_lock(&xt[table->af].mutex);
1505 private = table->private;
1506 list_del(&table->list);
1507 mutex_unlock(&xt[table->af].mutex);
1508 audit_log_nfcfg(table->name, table->af, private->number,
1509 AUDIT_XT_OP_UNREGISTER, GFP_KERNEL);
1510 kfree(table->ops);
1511 kfree(table);
1512
1513 return private;
1514 }
1515 EXPORT_SYMBOL_GPL(xt_unregister_table);
1516
1517 #ifdef CONFIG_PROC_FS
1518 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1519 {
1520 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1521 struct net *net = seq_file_net(seq);
1522 struct xt_pernet *xt_net;
1523
1524 xt_net = net_generic(net, xt_pernet_id);
1525
1526 mutex_lock(&xt[af].mutex);
1527 return seq_list_start(&xt_net->tables[af], *pos);
1528 }
1529
1530 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1531 {
1532 u8 af = (unsigned long)pde_data(file_inode(seq->file));
1533 struct net *net = seq_file_net(seq);
1534 struct xt_pernet *xt_net;
1535
1536 xt_net = net_generic(net, xt_pernet_id);
1537
1538 return seq_list_next(v, &xt_net->tables[af], pos);
1539 }
1540
1541 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1542 {
1543 u_int8_t af = (unsigned long)pde_data(file_inode(seq->file));
1544
1545 mutex_unlock(&xt[af].mutex);
1546 }
1547
1548 static int xt_table_seq_show(struct seq_file *seq, void *v)
1549 {
1550 struct xt_table *table = list_entry(v, struct xt_table, list);
1551
1552 if (*table->name)
1553 seq_printf(seq, "%s\n", table->name);
1554 return 0;
1555 }
1556
1557 static const struct seq_operations xt_table_seq_ops = {
1558 .start = xt_table_seq_start,
1559 .next = xt_table_seq_next,
1560 .stop = xt_table_seq_stop,
1561 .show = xt_table_seq_show,
1562 };
1563
1564
1565
1566
1567
1568 struct nf_mttg_trav {
1569 struct list_head *head, *curr;
1570 uint8_t class;
1571 };
1572
1573 enum {
1574 MTTG_TRAV_INIT,
1575 MTTG_TRAV_NFP_UNSPEC,
1576 MTTG_TRAV_NFP_SPEC,
1577 MTTG_TRAV_DONE,
1578 };
1579
1580 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1581 bool is_target)
1582 {
1583 static const uint8_t next_class[] = {
1584 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1585 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
1586 };
1587 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1588 struct nf_mttg_trav *trav = seq->private;
1589
1590 if (ppos != NULL)
1591 ++(*ppos);
1592
1593 switch (trav->class) {
1594 case MTTG_TRAV_INIT:
1595 trav->class = MTTG_TRAV_NFP_UNSPEC;
1596 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1597 trav->head = trav->curr = is_target ?
1598 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1599 break;
1600 case MTTG_TRAV_NFP_UNSPEC:
1601 trav->curr = trav->curr->next;
1602 if (trav->curr != trav->head)
1603 break;
1604 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1605 mutex_lock(&xt[nfproto].mutex);
1606 trav->head = trav->curr = is_target ?
1607 &xt[nfproto].target : &xt[nfproto].match;
1608 trav->class = next_class[trav->class];
1609 break;
1610 case MTTG_TRAV_NFP_SPEC:
1611 trav->curr = trav->curr->next;
1612 if (trav->curr != trav->head)
1613 break;
1614 fallthrough;
1615 default:
1616 return NULL;
1617 }
1618 return trav;
1619 }
1620
1621 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1622 bool is_target)
1623 {
1624 struct nf_mttg_trav *trav = seq->private;
1625 unsigned int j;
1626
1627 trav->class = MTTG_TRAV_INIT;
1628 for (j = 0; j < *pos; ++j)
1629 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1630 return NULL;
1631 return trav;
1632 }
1633
1634 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1635 {
1636 uint8_t nfproto = (unsigned long)pde_data(file_inode(seq->file));
1637 struct nf_mttg_trav *trav = seq->private;
1638
1639 switch (trav->class) {
1640 case MTTG_TRAV_NFP_UNSPEC:
1641 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1642 break;
1643 case MTTG_TRAV_NFP_SPEC:
1644 mutex_unlock(&xt[nfproto].mutex);
1645 break;
1646 }
1647 }
1648
1649 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1650 {
1651 return xt_mttg_seq_start(seq, pos, false);
1652 }
1653
1654 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1655 {
1656 return xt_mttg_seq_next(seq, v, ppos, false);
1657 }
1658
1659 static int xt_match_seq_show(struct seq_file *seq, void *v)
1660 {
1661 const struct nf_mttg_trav *trav = seq->private;
1662 const struct xt_match *match;
1663
1664 switch (trav->class) {
1665 case MTTG_TRAV_NFP_UNSPEC:
1666 case MTTG_TRAV_NFP_SPEC:
1667 if (trav->curr == trav->head)
1668 return 0;
1669 match = list_entry(trav->curr, struct xt_match, list);
1670 if (*match->name)
1671 seq_printf(seq, "%s\n", match->name);
1672 }
1673 return 0;
1674 }
1675
1676 static const struct seq_operations xt_match_seq_ops = {
1677 .start = xt_match_seq_start,
1678 .next = xt_match_seq_next,
1679 .stop = xt_mttg_seq_stop,
1680 .show = xt_match_seq_show,
1681 };
1682
1683 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1684 {
1685 return xt_mttg_seq_start(seq, pos, true);
1686 }
1687
1688 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1689 {
1690 return xt_mttg_seq_next(seq, v, ppos, true);
1691 }
1692
1693 static int xt_target_seq_show(struct seq_file *seq, void *v)
1694 {
1695 const struct nf_mttg_trav *trav = seq->private;
1696 const struct xt_target *target;
1697
1698 switch (trav->class) {
1699 case MTTG_TRAV_NFP_UNSPEC:
1700 case MTTG_TRAV_NFP_SPEC:
1701 if (trav->curr == trav->head)
1702 return 0;
1703 target = list_entry(trav->curr, struct xt_target, list);
1704 if (*target->name)
1705 seq_printf(seq, "%s\n", target->name);
1706 }
1707 return 0;
1708 }
1709
1710 static const struct seq_operations xt_target_seq_ops = {
1711 .start = xt_target_seq_start,
1712 .next = xt_target_seq_next,
1713 .stop = xt_mttg_seq_stop,
1714 .show = xt_target_seq_show,
1715 };
1716
1717 #define FORMAT_TABLES "_tables_names"
1718 #define FORMAT_MATCHES "_tables_matches"
1719 #define FORMAT_TARGETS "_tables_targets"
1720
1721 #endif
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731 struct nf_hook_ops *
1732 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1733 {
1734 unsigned int hook_mask = table->valid_hooks;
1735 uint8_t i, num_hooks = hweight32(hook_mask);
1736 uint8_t hooknum;
1737 struct nf_hook_ops *ops;
1738
1739 if (!num_hooks)
1740 return ERR_PTR(-EINVAL);
1741
1742 ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1743 if (ops == NULL)
1744 return ERR_PTR(-ENOMEM);
1745
1746 for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1747 hook_mask >>= 1, ++hooknum) {
1748 if (!(hook_mask & 1))
1749 continue;
1750 ops[i].hook = fn;
1751 ops[i].pf = table->af;
1752 ops[i].hooknum = hooknum;
1753 ops[i].priority = table->priority;
1754 ++i;
1755 }
1756
1757 return ops;
1758 }
1759 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1760
1761 int xt_register_template(const struct xt_table *table,
1762 int (*table_init)(struct net *net))
1763 {
1764 int ret = -EEXIST, af = table->af;
1765 struct xt_template *t;
1766
1767 mutex_lock(&xt[af].mutex);
1768
1769 list_for_each_entry(t, &xt_templates[af], list) {
1770 if (WARN_ON_ONCE(strcmp(table->name, t->name) == 0))
1771 goto out_unlock;
1772 }
1773
1774 ret = -ENOMEM;
1775 t = kzalloc(sizeof(*t), GFP_KERNEL);
1776 if (!t)
1777 goto out_unlock;
1778
1779 BUILD_BUG_ON(sizeof(t->name) != sizeof(table->name));
1780
1781 strscpy(t->name, table->name, sizeof(t->name));
1782 t->table_init = table_init;
1783 t->me = table->me;
1784 list_add(&t->list, &xt_templates[af]);
1785 ret = 0;
1786 out_unlock:
1787 mutex_unlock(&xt[af].mutex);
1788 return ret;
1789 }
1790 EXPORT_SYMBOL_GPL(xt_register_template);
1791
1792 void xt_unregister_template(const struct xt_table *table)
1793 {
1794 struct xt_template *t;
1795 int af = table->af;
1796
1797 mutex_lock(&xt[af].mutex);
1798 list_for_each_entry(t, &xt_templates[af], list) {
1799 if (strcmp(table->name, t->name))
1800 continue;
1801
1802 list_del(&t->list);
1803 mutex_unlock(&xt[af].mutex);
1804 kfree(t);
1805 return;
1806 }
1807
1808 mutex_unlock(&xt[af].mutex);
1809 WARN_ON_ONCE(1);
1810 }
1811 EXPORT_SYMBOL_GPL(xt_unregister_template);
1812
1813 int xt_proto_init(struct net *net, u_int8_t af)
1814 {
1815 #ifdef CONFIG_PROC_FS
1816 char buf[XT_FUNCTION_MAXNAMELEN];
1817 struct proc_dir_entry *proc;
1818 kuid_t root_uid;
1819 kgid_t root_gid;
1820 #endif
1821
1822 if (af >= ARRAY_SIZE(xt_prefix))
1823 return -EINVAL;
1824
1825
1826 #ifdef CONFIG_PROC_FS
1827 root_uid = make_kuid(net->user_ns, 0);
1828 root_gid = make_kgid(net->user_ns, 0);
1829
1830 strlcpy(buf, xt_prefix[af], sizeof(buf));
1831 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1832 proc = proc_create_net_data(buf, 0440, net->proc_net, &xt_table_seq_ops,
1833 sizeof(struct seq_net_private),
1834 (void *)(unsigned long)af);
1835 if (!proc)
1836 goto out;
1837 if (uid_valid(root_uid) && gid_valid(root_gid))
1838 proc_set_user(proc, root_uid, root_gid);
1839
1840 strlcpy(buf, xt_prefix[af], sizeof(buf));
1841 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1842 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1843 &xt_match_seq_ops, sizeof(struct nf_mttg_trav),
1844 (void *)(unsigned long)af);
1845 if (!proc)
1846 goto out_remove_tables;
1847 if (uid_valid(root_uid) && gid_valid(root_gid))
1848 proc_set_user(proc, root_uid, root_gid);
1849
1850 strlcpy(buf, xt_prefix[af], sizeof(buf));
1851 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1852 proc = proc_create_seq_private(buf, 0440, net->proc_net,
1853 &xt_target_seq_ops, sizeof(struct nf_mttg_trav),
1854 (void *)(unsigned long)af);
1855 if (!proc)
1856 goto out_remove_matches;
1857 if (uid_valid(root_uid) && gid_valid(root_gid))
1858 proc_set_user(proc, root_uid, root_gid);
1859 #endif
1860
1861 return 0;
1862
1863 #ifdef CONFIG_PROC_FS
1864 out_remove_matches:
1865 strlcpy(buf, xt_prefix[af], sizeof(buf));
1866 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1867 remove_proc_entry(buf, net->proc_net);
1868
1869 out_remove_tables:
1870 strlcpy(buf, xt_prefix[af], sizeof(buf));
1871 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1872 remove_proc_entry(buf, net->proc_net);
1873 out:
1874 return -1;
1875 #endif
1876 }
1877 EXPORT_SYMBOL_GPL(xt_proto_init);
1878
1879 void xt_proto_fini(struct net *net, u_int8_t af)
1880 {
1881 #ifdef CONFIG_PROC_FS
1882 char buf[XT_FUNCTION_MAXNAMELEN];
1883
1884 strlcpy(buf, xt_prefix[af], sizeof(buf));
1885 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1886 remove_proc_entry(buf, net->proc_net);
1887
1888 strlcpy(buf, xt_prefix[af], sizeof(buf));
1889 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1890 remove_proc_entry(buf, net->proc_net);
1891
1892 strlcpy(buf, xt_prefix[af], sizeof(buf));
1893 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1894 remove_proc_entry(buf, net->proc_net);
1895 #endif
1896 }
1897 EXPORT_SYMBOL_GPL(xt_proto_fini);
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1922 struct xt_counters *counter)
1923 {
1924 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1925
1926 if (nr_cpu_ids <= 1)
1927 return true;
1928
1929 if (!state->mem) {
1930 state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1931 XT_PCPU_BLOCK_SIZE);
1932 if (!state->mem)
1933 return false;
1934 }
1935 counter->pcnt = (__force unsigned long)(state->mem + state->off);
1936 state->off += sizeof(*counter);
1937 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1938 state->mem = NULL;
1939 state->off = 0;
1940 }
1941 return true;
1942 }
1943 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1944
1945 void xt_percpu_counter_free(struct xt_counters *counters)
1946 {
1947 unsigned long pcnt = counters->pcnt;
1948
1949 if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1950 free_percpu((void __percpu *)pcnt);
1951 }
1952 EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1953
1954 static int __net_init xt_net_init(struct net *net)
1955 {
1956 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1957 int i;
1958
1959 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1960 INIT_LIST_HEAD(&xt_net->tables[i]);
1961 return 0;
1962 }
1963
1964 static void __net_exit xt_net_exit(struct net *net)
1965 {
1966 struct xt_pernet *xt_net = net_generic(net, xt_pernet_id);
1967 int i;
1968
1969 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1970 WARN_ON_ONCE(!list_empty(&xt_net->tables[i]));
1971 }
1972
1973 static struct pernet_operations xt_net_ops = {
1974 .init = xt_net_init,
1975 .exit = xt_net_exit,
1976 .id = &xt_pernet_id,
1977 .size = sizeof(struct xt_pernet),
1978 };
1979
1980 static int __init xt_init(void)
1981 {
1982 unsigned int i;
1983 int rv;
1984
1985 for_each_possible_cpu(i) {
1986 seqcount_init(&per_cpu(xt_recseq, i));
1987 }
1988
1989 xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL);
1990 if (!xt)
1991 return -ENOMEM;
1992
1993 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1994 mutex_init(&xt[i].mutex);
1995 #ifdef CONFIG_NETFILTER_XTABLES_COMPAT
1996 mutex_init(&xt[i].compat_mutex);
1997 xt[i].compat_tab = NULL;
1998 #endif
1999 INIT_LIST_HEAD(&xt[i].target);
2000 INIT_LIST_HEAD(&xt[i].match);
2001 INIT_LIST_HEAD(&xt_templates[i]);
2002 }
2003 rv = register_pernet_subsys(&xt_net_ops);
2004 if (rv < 0)
2005 kfree(xt);
2006 return rv;
2007 }
2008
2009 static void __exit xt_fini(void)
2010 {
2011 unregister_pernet_subsys(&xt_net_ops);
2012 kfree(xt);
2013 }
2014
2015 module_init(xt_init);
2016 module_exit(xt_fini);
2017