0001
0002 #include "audit.h"
0003 #include <linux/fsnotify_backend.h>
0004 #include <linux/namei.h>
0005 #include <linux/mount.h>
0006 #include <linux/kthread.h>
0007 #include <linux/refcount.h>
0008 #include <linux/slab.h>
0009
0010 struct audit_tree;
0011 struct audit_chunk;
0012
0013 struct audit_tree {
0014 refcount_t count;
0015 int goner;
0016 struct audit_chunk *root;
0017 struct list_head chunks;
0018 struct list_head rules;
0019 struct list_head list;
0020 struct list_head same_root;
0021 struct rcu_head head;
0022 char pathname[];
0023 };
0024
0025 struct audit_chunk {
0026 struct list_head hash;
0027 unsigned long key;
0028 struct fsnotify_mark *mark;
0029 struct list_head trees;
0030 int count;
0031 atomic_long_t refs;
0032 struct rcu_head head;
0033 struct audit_node {
0034 struct list_head list;
0035 struct audit_tree *owner;
0036 unsigned index;
0037 } owners[];
0038 };
0039
0040 struct audit_tree_mark {
0041 struct fsnotify_mark mark;
0042 struct audit_chunk *chunk;
0043 };
0044
0045 static LIST_HEAD(tree_list);
0046 static LIST_HEAD(prune_list);
0047 static struct task_struct *prune_thread;
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static struct fsnotify_group *audit_tree_group;
0091 static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
0092
0093 static struct audit_tree *alloc_tree(const char *s)
0094 {
0095 struct audit_tree *tree;
0096
0097 tree = kmalloc(struct_size(tree, pathname, strlen(s) + 1), GFP_KERNEL);
0098 if (tree) {
0099 refcount_set(&tree->count, 1);
0100 tree->goner = 0;
0101 INIT_LIST_HEAD(&tree->chunks);
0102 INIT_LIST_HEAD(&tree->rules);
0103 INIT_LIST_HEAD(&tree->list);
0104 INIT_LIST_HEAD(&tree->same_root);
0105 tree->root = NULL;
0106 strcpy(tree->pathname, s);
0107 }
0108 return tree;
0109 }
0110
0111 static inline void get_tree(struct audit_tree *tree)
0112 {
0113 refcount_inc(&tree->count);
0114 }
0115
0116 static inline void put_tree(struct audit_tree *tree)
0117 {
0118 if (refcount_dec_and_test(&tree->count))
0119 kfree_rcu(tree, head);
0120 }
0121
0122
0123 const char *audit_tree_path(struct audit_tree *tree)
0124 {
0125 return tree->pathname;
0126 }
0127
0128 static void free_chunk(struct audit_chunk *chunk)
0129 {
0130 int i;
0131
0132 for (i = 0; i < chunk->count; i++) {
0133 if (chunk->owners[i].owner)
0134 put_tree(chunk->owners[i].owner);
0135 }
0136 kfree(chunk);
0137 }
0138
0139 void audit_put_chunk(struct audit_chunk *chunk)
0140 {
0141 if (atomic_long_dec_and_test(&chunk->refs))
0142 free_chunk(chunk);
0143 }
0144
0145 static void __put_chunk(struct rcu_head *rcu)
0146 {
0147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
0148 audit_put_chunk(chunk);
0149 }
0150
0151
0152
0153
0154
0155
0156 static void audit_mark_put_chunk(struct audit_chunk *chunk)
0157 {
0158 call_rcu(&chunk->head, __put_chunk);
0159 }
0160
0161 static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
0162 {
0163 return container_of(mark, struct audit_tree_mark, mark);
0164 }
0165
0166 static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
0167 {
0168 return audit_mark(mark)->chunk;
0169 }
0170
0171 static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
0172 {
0173 kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
0174 }
0175
0176 static struct fsnotify_mark *alloc_mark(void)
0177 {
0178 struct audit_tree_mark *amark;
0179
0180 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
0181 if (!amark)
0182 return NULL;
0183 fsnotify_init_mark(&amark->mark, audit_tree_group);
0184 amark->mark.mask = FS_IN_IGNORED;
0185 return &amark->mark;
0186 }
0187
0188 static struct audit_chunk *alloc_chunk(int count)
0189 {
0190 struct audit_chunk *chunk;
0191 int i;
0192
0193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
0194 if (!chunk)
0195 return NULL;
0196
0197 INIT_LIST_HEAD(&chunk->hash);
0198 INIT_LIST_HEAD(&chunk->trees);
0199 chunk->count = count;
0200 atomic_long_set(&chunk->refs, 1);
0201 for (i = 0; i < count; i++) {
0202 INIT_LIST_HEAD(&chunk->owners[i].list);
0203 chunk->owners[i].index = i;
0204 }
0205 return chunk;
0206 }
0207
0208 enum {HASH_SIZE = 128};
0209 static struct list_head chunk_hash_heads[HASH_SIZE];
0210 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
0211
0212
0213 static unsigned long inode_to_key(const struct inode *inode)
0214 {
0215
0216 return (unsigned long)&inode->i_fsnotify_marks;
0217 }
0218
0219 static inline struct list_head *chunk_hash(unsigned long key)
0220 {
0221 unsigned long n = key / L1_CACHE_BYTES;
0222 return chunk_hash_heads + n % HASH_SIZE;
0223 }
0224
0225
0226 static void insert_hash(struct audit_chunk *chunk)
0227 {
0228 struct list_head *list;
0229
0230
0231
0232
0233
0234
0235 smp_wmb();
0236 WARN_ON_ONCE(!chunk->key);
0237 list = chunk_hash(chunk->key);
0238 list_add_rcu(&chunk->hash, list);
0239 }
0240
0241
0242 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
0243 {
0244 unsigned long key = inode_to_key(inode);
0245 struct list_head *list = chunk_hash(key);
0246 struct audit_chunk *p;
0247
0248 list_for_each_entry_rcu(p, list, hash) {
0249
0250
0251
0252
0253 if (READ_ONCE(p->key) == key) {
0254 atomic_long_inc(&p->refs);
0255 return p;
0256 }
0257 }
0258 return NULL;
0259 }
0260
0261 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
0262 {
0263 int n;
0264 for (n = 0; n < chunk->count; n++)
0265 if (chunk->owners[n].owner == tree)
0266 return true;
0267 return false;
0268 }
0269
0270
0271
0272 static struct audit_chunk *find_chunk(struct audit_node *p)
0273 {
0274 int index = p->index & ~(1U<<31);
0275 p -= index;
0276 return container_of(p, struct audit_chunk, owners[0]);
0277 }
0278
0279 static void replace_mark_chunk(struct fsnotify_mark *mark,
0280 struct audit_chunk *chunk)
0281 {
0282 struct audit_chunk *old;
0283
0284 assert_spin_locked(&hash_lock);
0285 old = mark_chunk(mark);
0286 audit_mark(mark)->chunk = chunk;
0287 if (chunk)
0288 chunk->mark = mark;
0289 if (old)
0290 old->mark = NULL;
0291 }
0292
0293 static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
0294 {
0295 struct audit_tree *owner;
0296 int i, j;
0297
0298 new->key = old->key;
0299 list_splice_init(&old->trees, &new->trees);
0300 list_for_each_entry(owner, &new->trees, same_root)
0301 owner->root = new;
0302 for (i = j = 0; j < old->count; i++, j++) {
0303 if (!old->owners[j].owner) {
0304 i--;
0305 continue;
0306 }
0307 owner = old->owners[j].owner;
0308 new->owners[i].owner = owner;
0309 new->owners[i].index = old->owners[j].index - j + i;
0310 if (!owner)
0311 continue;
0312 get_tree(owner);
0313 list_replace_init(&old->owners[j].list, &new->owners[i].list);
0314 }
0315 replace_mark_chunk(old->mark, new);
0316
0317
0318
0319
0320
0321 smp_wmb();
0322 list_replace_rcu(&old->hash, &new->hash);
0323 }
0324
0325 static void remove_chunk_node(struct audit_chunk *chunk, struct audit_node *p)
0326 {
0327 struct audit_tree *owner = p->owner;
0328
0329 if (owner->root == chunk) {
0330 list_del_init(&owner->same_root);
0331 owner->root = NULL;
0332 }
0333 list_del_init(&p->list);
0334 p->owner = NULL;
0335 put_tree(owner);
0336 }
0337
0338 static int chunk_count_trees(struct audit_chunk *chunk)
0339 {
0340 int i;
0341 int ret = 0;
0342
0343 for (i = 0; i < chunk->count; i++)
0344 if (chunk->owners[i].owner)
0345 ret++;
0346 return ret;
0347 }
0348
0349 static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
0350 {
0351 struct audit_chunk *new;
0352 int size;
0353
0354 fsnotify_group_lock(audit_tree_group);
0355
0356
0357
0358
0359 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
0360 mark_chunk(mark) != chunk)
0361 goto out_mutex;
0362
0363 size = chunk_count_trees(chunk);
0364 if (!size) {
0365 spin_lock(&hash_lock);
0366 list_del_init(&chunk->trees);
0367 list_del_rcu(&chunk->hash);
0368 replace_mark_chunk(mark, NULL);
0369 spin_unlock(&hash_lock);
0370 fsnotify_detach_mark(mark);
0371 fsnotify_group_unlock(audit_tree_group);
0372 audit_mark_put_chunk(chunk);
0373 fsnotify_free_mark(mark);
0374 return;
0375 }
0376
0377 new = alloc_chunk(size);
0378 if (!new)
0379 goto out_mutex;
0380
0381 spin_lock(&hash_lock);
0382
0383
0384
0385
0386 replace_chunk(new, chunk);
0387 spin_unlock(&hash_lock);
0388 fsnotify_group_unlock(audit_tree_group);
0389 audit_mark_put_chunk(chunk);
0390 return;
0391
0392 out_mutex:
0393 fsnotify_group_unlock(audit_tree_group);
0394 }
0395
0396
0397 static int create_chunk(struct inode *inode, struct audit_tree *tree)
0398 {
0399 struct fsnotify_mark *mark;
0400 struct audit_chunk *chunk = alloc_chunk(1);
0401
0402 if (!chunk) {
0403 fsnotify_group_unlock(audit_tree_group);
0404 return -ENOMEM;
0405 }
0406
0407 mark = alloc_mark();
0408 if (!mark) {
0409 fsnotify_group_unlock(audit_tree_group);
0410 kfree(chunk);
0411 return -ENOMEM;
0412 }
0413
0414 if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
0415 fsnotify_group_unlock(audit_tree_group);
0416 fsnotify_put_mark(mark);
0417 kfree(chunk);
0418 return -ENOSPC;
0419 }
0420
0421 spin_lock(&hash_lock);
0422 if (tree->goner) {
0423 spin_unlock(&hash_lock);
0424 fsnotify_detach_mark(mark);
0425 fsnotify_group_unlock(audit_tree_group);
0426 fsnotify_free_mark(mark);
0427 fsnotify_put_mark(mark);
0428 kfree(chunk);
0429 return 0;
0430 }
0431 replace_mark_chunk(mark, chunk);
0432 chunk->owners[0].index = (1U << 31);
0433 chunk->owners[0].owner = tree;
0434 get_tree(tree);
0435 list_add(&chunk->owners[0].list, &tree->chunks);
0436 if (!tree->root) {
0437 tree->root = chunk;
0438 list_add(&tree->same_root, &chunk->trees);
0439 }
0440 chunk->key = inode_to_key(inode);
0441
0442
0443
0444
0445 insert_hash(chunk);
0446 spin_unlock(&hash_lock);
0447 fsnotify_group_unlock(audit_tree_group);
0448
0449
0450
0451
0452
0453 fsnotify_put_mark(mark);
0454 return 0;
0455 }
0456
0457
0458 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
0459 {
0460 struct fsnotify_mark *mark;
0461 struct audit_chunk *chunk, *old;
0462 struct audit_node *p;
0463 int n;
0464
0465 fsnotify_group_lock(audit_tree_group);
0466 mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
0467 if (!mark)
0468 return create_chunk(inode, tree);
0469
0470
0471
0472
0473
0474
0475
0476 spin_lock(&hash_lock);
0477 old = mark_chunk(mark);
0478 for (n = 0; n < old->count; n++) {
0479 if (old->owners[n].owner == tree) {
0480 spin_unlock(&hash_lock);
0481 fsnotify_group_unlock(audit_tree_group);
0482 fsnotify_put_mark(mark);
0483 return 0;
0484 }
0485 }
0486 spin_unlock(&hash_lock);
0487
0488 chunk = alloc_chunk(old->count + 1);
0489 if (!chunk) {
0490 fsnotify_group_unlock(audit_tree_group);
0491 fsnotify_put_mark(mark);
0492 return -ENOMEM;
0493 }
0494
0495 spin_lock(&hash_lock);
0496 if (tree->goner) {
0497 spin_unlock(&hash_lock);
0498 fsnotify_group_unlock(audit_tree_group);
0499 fsnotify_put_mark(mark);
0500 kfree(chunk);
0501 return 0;
0502 }
0503 p = &chunk->owners[chunk->count - 1];
0504 p->index = (chunk->count - 1) | (1U<<31);
0505 p->owner = tree;
0506 get_tree(tree);
0507 list_add(&p->list, &tree->chunks);
0508 if (!tree->root) {
0509 tree->root = chunk;
0510 list_add(&tree->same_root, &chunk->trees);
0511 }
0512
0513
0514
0515
0516 replace_chunk(chunk, old);
0517 spin_unlock(&hash_lock);
0518 fsnotify_group_unlock(audit_tree_group);
0519 fsnotify_put_mark(mark);
0520 audit_mark_put_chunk(old);
0521
0522 return 0;
0523 }
0524
0525 static void audit_tree_log_remove_rule(struct audit_context *context,
0526 struct audit_krule *rule)
0527 {
0528 struct audit_buffer *ab;
0529
0530 if (!audit_enabled)
0531 return;
0532 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
0533 if (unlikely(!ab))
0534 return;
0535 audit_log_format(ab, "op=remove_rule dir=");
0536 audit_log_untrustedstring(ab, rule->tree->pathname);
0537 audit_log_key(ab, rule->filterkey);
0538 audit_log_format(ab, " list=%d res=1", rule->listnr);
0539 audit_log_end(ab);
0540 }
0541
0542 static void kill_rules(struct audit_context *context, struct audit_tree *tree)
0543 {
0544 struct audit_krule *rule, *next;
0545 struct audit_entry *entry;
0546
0547 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
0548 entry = container_of(rule, struct audit_entry, rule);
0549
0550 list_del_init(&rule->rlist);
0551 if (rule->tree) {
0552
0553 audit_tree_log_remove_rule(context, rule);
0554 if (entry->rule.exe)
0555 audit_remove_mark(entry->rule.exe);
0556 rule->tree = NULL;
0557 list_del_rcu(&entry->list);
0558 list_del(&entry->rule.list);
0559 call_rcu(&entry->rcu, audit_free_rule_rcu);
0560 }
0561 }
0562 }
0563
0564
0565
0566
0567
0568
0569 static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
0570 {
0571 spin_lock(&hash_lock);
0572 while (!list_empty(&victim->chunks)) {
0573 struct audit_node *p;
0574 struct audit_chunk *chunk;
0575 struct fsnotify_mark *mark;
0576
0577 p = list_first_entry(&victim->chunks, struct audit_node, list);
0578
0579 if (tagged && !(p->index & (1U<<31)))
0580 break;
0581 chunk = find_chunk(p);
0582 mark = chunk->mark;
0583 remove_chunk_node(chunk, p);
0584
0585 if (!mark)
0586 continue;
0587 fsnotify_get_mark(mark);
0588 spin_unlock(&hash_lock);
0589
0590 untag_chunk(chunk, mark);
0591 fsnotify_put_mark(mark);
0592
0593 spin_lock(&hash_lock);
0594 }
0595 spin_unlock(&hash_lock);
0596 }
0597
0598
0599
0600
0601 static void prune_one(struct audit_tree *victim)
0602 {
0603 prune_tree_chunks(victim, false);
0604 put_tree(victim);
0605 }
0606
0607
0608
0609 static void trim_marked(struct audit_tree *tree)
0610 {
0611 struct list_head *p, *q;
0612 spin_lock(&hash_lock);
0613 if (tree->goner) {
0614 spin_unlock(&hash_lock);
0615 return;
0616 }
0617
0618 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
0619 struct audit_node *node = list_entry(p, struct audit_node, list);
0620 q = p->next;
0621 if (node->index & (1U<<31)) {
0622 list_del_init(p);
0623 list_add(p, &tree->chunks);
0624 }
0625 }
0626 spin_unlock(&hash_lock);
0627
0628 prune_tree_chunks(tree, true);
0629
0630 spin_lock(&hash_lock);
0631 if (!tree->root && !tree->goner) {
0632 tree->goner = 1;
0633 spin_unlock(&hash_lock);
0634 mutex_lock(&audit_filter_mutex);
0635 kill_rules(audit_context(), tree);
0636 list_del_init(&tree->list);
0637 mutex_unlock(&audit_filter_mutex);
0638 prune_one(tree);
0639 } else {
0640 spin_unlock(&hash_lock);
0641 }
0642 }
0643
0644 static void audit_schedule_prune(void);
0645
0646
0647 int audit_remove_tree_rule(struct audit_krule *rule)
0648 {
0649 struct audit_tree *tree;
0650 tree = rule->tree;
0651 if (tree) {
0652 spin_lock(&hash_lock);
0653 list_del_init(&rule->rlist);
0654 if (list_empty(&tree->rules) && !tree->goner) {
0655 tree->root = NULL;
0656 list_del_init(&tree->same_root);
0657 tree->goner = 1;
0658 list_move(&tree->list, &prune_list);
0659 rule->tree = NULL;
0660 spin_unlock(&hash_lock);
0661 audit_schedule_prune();
0662 return 1;
0663 }
0664 rule->tree = NULL;
0665 spin_unlock(&hash_lock);
0666 return 1;
0667 }
0668 return 0;
0669 }
0670
0671 static int compare_root(struct vfsmount *mnt, void *arg)
0672 {
0673 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
0674 (unsigned long)arg;
0675 }
0676
0677 void audit_trim_trees(void)
0678 {
0679 struct list_head cursor;
0680
0681 mutex_lock(&audit_filter_mutex);
0682 list_add(&cursor, &tree_list);
0683 while (cursor.next != &tree_list) {
0684 struct audit_tree *tree;
0685 struct path path;
0686 struct vfsmount *root_mnt;
0687 struct audit_node *node;
0688 int err;
0689
0690 tree = container_of(cursor.next, struct audit_tree, list);
0691 get_tree(tree);
0692 list_move(&cursor, &tree->list);
0693 mutex_unlock(&audit_filter_mutex);
0694
0695 err = kern_path(tree->pathname, 0, &path);
0696 if (err)
0697 goto skip_it;
0698
0699 root_mnt = collect_mounts(&path);
0700 path_put(&path);
0701 if (IS_ERR(root_mnt))
0702 goto skip_it;
0703
0704 spin_lock(&hash_lock);
0705 list_for_each_entry(node, &tree->chunks, list) {
0706 struct audit_chunk *chunk = find_chunk(node);
0707
0708 node->index |= 1U<<31;
0709 if (iterate_mounts(compare_root,
0710 (void *)(chunk->key),
0711 root_mnt))
0712 node->index &= ~(1U<<31);
0713 }
0714 spin_unlock(&hash_lock);
0715 trim_marked(tree);
0716 drop_collected_mounts(root_mnt);
0717 skip_it:
0718 put_tree(tree);
0719 mutex_lock(&audit_filter_mutex);
0720 }
0721 list_del(&cursor);
0722 mutex_unlock(&audit_filter_mutex);
0723 }
0724
0725 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
0726 {
0727
0728 if (pathname[0] != '/' ||
0729 (rule->listnr != AUDIT_FILTER_EXIT &&
0730 rule->listnr != AUDIT_FILTER_URING_EXIT) ||
0731 op != Audit_equal ||
0732 rule->inode_f || rule->watch || rule->tree)
0733 return -EINVAL;
0734 rule->tree = alloc_tree(pathname);
0735 if (!rule->tree)
0736 return -ENOMEM;
0737 return 0;
0738 }
0739
0740 void audit_put_tree(struct audit_tree *tree)
0741 {
0742 put_tree(tree);
0743 }
0744
0745 static int tag_mount(struct vfsmount *mnt, void *arg)
0746 {
0747 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
0748 }
0749
0750
0751
0752
0753
0754 static int prune_tree_thread(void *unused)
0755 {
0756 for (;;) {
0757 if (list_empty(&prune_list)) {
0758 set_current_state(TASK_INTERRUPTIBLE);
0759 schedule();
0760 }
0761
0762 audit_ctl_lock();
0763 mutex_lock(&audit_filter_mutex);
0764
0765 while (!list_empty(&prune_list)) {
0766 struct audit_tree *victim;
0767
0768 victim = list_entry(prune_list.next,
0769 struct audit_tree, list);
0770 list_del_init(&victim->list);
0771
0772 mutex_unlock(&audit_filter_mutex);
0773
0774 prune_one(victim);
0775
0776 mutex_lock(&audit_filter_mutex);
0777 }
0778
0779 mutex_unlock(&audit_filter_mutex);
0780 audit_ctl_unlock();
0781 }
0782 return 0;
0783 }
0784
0785 static int audit_launch_prune(void)
0786 {
0787 if (prune_thread)
0788 return 0;
0789 prune_thread = kthread_run(prune_tree_thread, NULL,
0790 "audit_prune_tree");
0791 if (IS_ERR(prune_thread)) {
0792 pr_err("cannot start thread audit_prune_tree");
0793 prune_thread = NULL;
0794 return -ENOMEM;
0795 }
0796 return 0;
0797 }
0798
0799
0800 int audit_add_tree_rule(struct audit_krule *rule)
0801 {
0802 struct audit_tree *seed = rule->tree, *tree;
0803 struct path path;
0804 struct vfsmount *mnt;
0805 int err;
0806
0807 rule->tree = NULL;
0808 list_for_each_entry(tree, &tree_list, list) {
0809 if (!strcmp(seed->pathname, tree->pathname)) {
0810 put_tree(seed);
0811 rule->tree = tree;
0812 list_add(&rule->rlist, &tree->rules);
0813 return 0;
0814 }
0815 }
0816 tree = seed;
0817 list_add(&tree->list, &tree_list);
0818 list_add(&rule->rlist, &tree->rules);
0819
0820 mutex_unlock(&audit_filter_mutex);
0821
0822 if (unlikely(!prune_thread)) {
0823 err = audit_launch_prune();
0824 if (err)
0825 goto Err;
0826 }
0827
0828 err = kern_path(tree->pathname, 0, &path);
0829 if (err)
0830 goto Err;
0831 mnt = collect_mounts(&path);
0832 path_put(&path);
0833 if (IS_ERR(mnt)) {
0834 err = PTR_ERR(mnt);
0835 goto Err;
0836 }
0837
0838 get_tree(tree);
0839 err = iterate_mounts(tag_mount, tree, mnt);
0840 drop_collected_mounts(mnt);
0841
0842 if (!err) {
0843 struct audit_node *node;
0844 spin_lock(&hash_lock);
0845 list_for_each_entry(node, &tree->chunks, list)
0846 node->index &= ~(1U<<31);
0847 spin_unlock(&hash_lock);
0848 } else {
0849 trim_marked(tree);
0850 goto Err;
0851 }
0852
0853 mutex_lock(&audit_filter_mutex);
0854 if (list_empty(&rule->rlist)) {
0855 put_tree(tree);
0856 return -ENOENT;
0857 }
0858 rule->tree = tree;
0859 put_tree(tree);
0860
0861 return 0;
0862 Err:
0863 mutex_lock(&audit_filter_mutex);
0864 list_del_init(&tree->list);
0865 list_del_init(&tree->rules);
0866 put_tree(tree);
0867 return err;
0868 }
0869
0870 int audit_tag_tree(char *old, char *new)
0871 {
0872 struct list_head cursor, barrier;
0873 int failed = 0;
0874 struct path path1, path2;
0875 struct vfsmount *tagged;
0876 int err;
0877
0878 err = kern_path(new, 0, &path2);
0879 if (err)
0880 return err;
0881 tagged = collect_mounts(&path2);
0882 path_put(&path2);
0883 if (IS_ERR(tagged))
0884 return PTR_ERR(tagged);
0885
0886 err = kern_path(old, 0, &path1);
0887 if (err) {
0888 drop_collected_mounts(tagged);
0889 return err;
0890 }
0891
0892 mutex_lock(&audit_filter_mutex);
0893 list_add(&barrier, &tree_list);
0894 list_add(&cursor, &barrier);
0895
0896 while (cursor.next != &tree_list) {
0897 struct audit_tree *tree;
0898 int good_one = 0;
0899
0900 tree = container_of(cursor.next, struct audit_tree, list);
0901 get_tree(tree);
0902 list_move(&cursor, &tree->list);
0903 mutex_unlock(&audit_filter_mutex);
0904
0905 err = kern_path(tree->pathname, 0, &path2);
0906 if (!err) {
0907 good_one = path_is_under(&path1, &path2);
0908 path_put(&path2);
0909 }
0910
0911 if (!good_one) {
0912 put_tree(tree);
0913 mutex_lock(&audit_filter_mutex);
0914 continue;
0915 }
0916
0917 failed = iterate_mounts(tag_mount, tree, tagged);
0918 if (failed) {
0919 put_tree(tree);
0920 mutex_lock(&audit_filter_mutex);
0921 break;
0922 }
0923
0924 mutex_lock(&audit_filter_mutex);
0925 spin_lock(&hash_lock);
0926 if (!tree->goner) {
0927 list_move(&tree->list, &tree_list);
0928 }
0929 spin_unlock(&hash_lock);
0930 put_tree(tree);
0931 }
0932
0933 while (barrier.prev != &tree_list) {
0934 struct audit_tree *tree;
0935
0936 tree = container_of(barrier.prev, struct audit_tree, list);
0937 get_tree(tree);
0938 list_move(&tree->list, &barrier);
0939 mutex_unlock(&audit_filter_mutex);
0940
0941 if (!failed) {
0942 struct audit_node *node;
0943 spin_lock(&hash_lock);
0944 list_for_each_entry(node, &tree->chunks, list)
0945 node->index &= ~(1U<<31);
0946 spin_unlock(&hash_lock);
0947 } else {
0948 trim_marked(tree);
0949 }
0950
0951 put_tree(tree);
0952 mutex_lock(&audit_filter_mutex);
0953 }
0954 list_del(&barrier);
0955 list_del(&cursor);
0956 mutex_unlock(&audit_filter_mutex);
0957 path_put(&path1);
0958 drop_collected_mounts(tagged);
0959 return failed;
0960 }
0961
0962
0963 static void audit_schedule_prune(void)
0964 {
0965 wake_up_process(prune_thread);
0966 }
0967
0968
0969
0970
0971
0972 void audit_kill_trees(struct audit_context *context)
0973 {
0974 struct list_head *list = &context->killed_trees;
0975
0976 audit_ctl_lock();
0977 mutex_lock(&audit_filter_mutex);
0978
0979 while (!list_empty(list)) {
0980 struct audit_tree *victim;
0981
0982 victim = list_entry(list->next, struct audit_tree, list);
0983 kill_rules(context, victim);
0984 list_del_init(&victim->list);
0985
0986 mutex_unlock(&audit_filter_mutex);
0987
0988 prune_one(victim);
0989
0990 mutex_lock(&audit_filter_mutex);
0991 }
0992
0993 mutex_unlock(&audit_filter_mutex);
0994 audit_ctl_unlock();
0995 }
0996
0997
0998
0999
1000
1001 static void evict_chunk(struct audit_chunk *chunk)
1002 {
1003 struct audit_tree *owner;
1004 struct list_head *postponed = audit_killed_trees();
1005 int need_prune = 0;
1006 int n;
1007
1008 mutex_lock(&audit_filter_mutex);
1009 spin_lock(&hash_lock);
1010 while (!list_empty(&chunk->trees)) {
1011 owner = list_entry(chunk->trees.next,
1012 struct audit_tree, same_root);
1013 owner->goner = 1;
1014 owner->root = NULL;
1015 list_del_init(&owner->same_root);
1016 spin_unlock(&hash_lock);
1017 if (!postponed) {
1018 kill_rules(audit_context(), owner);
1019 list_move(&owner->list, &prune_list);
1020 need_prune = 1;
1021 } else {
1022 list_move(&owner->list, postponed);
1023 }
1024 spin_lock(&hash_lock);
1025 }
1026 list_del_rcu(&chunk->hash);
1027 for (n = 0; n < chunk->count; n++)
1028 list_del_init(&chunk->owners[n].list);
1029 spin_unlock(&hash_lock);
1030 mutex_unlock(&audit_filter_mutex);
1031 if (need_prune)
1032 audit_schedule_prune();
1033 }
1034
1035 static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
1036 struct inode *inode, struct inode *dir,
1037 const struct qstr *file_name, u32 cookie)
1038 {
1039 return 0;
1040 }
1041
1042 static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1043 struct fsnotify_group *group)
1044 {
1045 struct audit_chunk *chunk;
1046
1047 fsnotify_group_lock(mark->group);
1048 spin_lock(&hash_lock);
1049 chunk = mark_chunk(mark);
1050 replace_mark_chunk(mark, NULL);
1051 spin_unlock(&hash_lock);
1052 fsnotify_group_unlock(mark->group);
1053 if (chunk) {
1054 evict_chunk(chunk);
1055 audit_mark_put_chunk(chunk);
1056 }
1057
1058
1059
1060
1061
1062 BUG_ON(refcount_read(&mark->refcnt) < 1);
1063 }
1064
1065 static const struct fsnotify_ops audit_tree_ops = {
1066 .handle_inode_event = audit_tree_handle_event,
1067 .freeing_mark = audit_tree_freeing_mark,
1068 .free_mark = audit_tree_destroy_watch,
1069 };
1070
1071 static int __init audit_tree_init(void)
1072 {
1073 int i;
1074
1075 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1076
1077 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops, 0);
1078 if (IS_ERR(audit_tree_group))
1079 audit_panic("cannot initialize fsnotify group for rectree watches");
1080
1081 for (i = 0; i < HASH_SIZE; i++)
1082 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1083
1084 return 0;
1085 }
1086 __initcall(audit_tree_init);