fed up with those stupid warnings
[mmotm.git] / kernel / audit_tree.c
blobd9aab73d90e2d859d4706d133bc8deae13657609
1 #include "audit.h"
2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
7 struct audit_tree;
8 struct audit_chunk;
10 struct audit_tree {
11 atomic_t count;
12 int goner;
13 struct audit_chunk *root;
14 struct list_head chunks;
15 struct list_head rules;
16 struct list_head list;
17 struct list_head same_root;
18 struct rcu_head head;
19 char pathname[];
22 struct audit_chunk {
23 struct list_head hash;
24 struct fsnotify_mark_entry mark;
25 struct list_head trees; /* with root here */
26 int dead;
27 int count;
28 atomic_long_t refs;
29 struct rcu_head head;
30 struct node {
31 struct list_head list;
32 struct audit_tree *owner;
33 unsigned index; /* index; upper bit indicates 'will prune' */
34 } owners[];
37 static LIST_HEAD(tree_list);
38 static LIST_HEAD(prune_list);
41 * One struct chunk is attached to each inode of interest.
42 * We replace struct chunk on tagging/untagging.
43 * Rules have pointer to struct audit_tree.
44 * Rules have struct list_head rlist forming a list of rules over
45 * the same tree.
46 * References to struct chunk are collected at audit_inode{,_child}()
47 * time and used in AUDIT_TREE rule matching.
48 * These references are dropped at the same time we are calling
49 * audit_free_names(), etc.
51 * Cyclic lists galore:
52 * tree.chunks anchors chunk.owners[].list hash_lock
53 * tree.rules anchors rule.rlist audit_filter_mutex
54 * chunk.trees anchors tree.same_root hash_lock
55 * chunk.hash is a hash with middle bits of watch.inode as
56 * a hash function. RCU, hash_lock
58 * tree is refcounted; one reference for "some rules on rules_list refer to
59 * it", one for each chunk with pointer to it.
61 * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
62 * of watch contributes 1 to .refs).
64 * node.index allows to get from node.list to containing chunk.
65 * MSB of that sucker is stolen to mark taggings that we might have to
66 * revert - several operations have very unpleasant cleanup logics and
67 * that makes a difference. Some.
70 static struct fsnotify_group *audit_tree_group;
72 static struct audit_tree *alloc_tree(const char *s)
74 struct audit_tree *tree;
76 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
77 if (tree) {
78 atomic_set(&tree->count, 1);
79 tree->goner = 0;
80 INIT_LIST_HEAD(&tree->chunks);
81 INIT_LIST_HEAD(&tree->rules);
82 INIT_LIST_HEAD(&tree->list);
83 INIT_LIST_HEAD(&tree->same_root);
84 tree->root = NULL;
85 strcpy(tree->pathname, s);
87 return tree;
90 static inline void get_tree(struct audit_tree *tree)
92 atomic_inc(&tree->count);
95 static void __put_tree(struct rcu_head *rcu)
97 struct audit_tree *tree = container_of(rcu, struct audit_tree, head);
98 kfree(tree);
101 static inline void put_tree(struct audit_tree *tree)
103 if (atomic_dec_and_test(&tree->count))
104 call_rcu(&tree->head, __put_tree);
107 /* to avoid bringing the entire thing in audit.h */
108 const char *audit_tree_path(struct audit_tree *tree)
110 return tree->pathname;
113 static void free_chunk(struct audit_chunk *chunk)
115 int i;
117 for (i = 0; i < chunk->count; i++) {
118 if (chunk->owners[i].owner)
119 put_tree(chunk->owners[i].owner);
121 kfree(chunk);
124 void audit_put_chunk(struct audit_chunk *chunk)
126 if (atomic_long_dec_and_test(&chunk->refs))
127 free_chunk(chunk);
130 static void __put_chunk(struct rcu_head *rcu)
132 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
133 audit_put_chunk(chunk);
136 static void audit_tree_destroy_watch(struct fsnotify_mark_entry *entry)
138 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
139 call_rcu(&chunk->head, __put_chunk);
142 static struct audit_chunk *alloc_chunk(int count)
144 struct audit_chunk *chunk;
145 size_t size;
146 int i;
148 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
149 chunk = kzalloc(size, GFP_KERNEL);
150 if (!chunk)
151 return NULL;
153 INIT_LIST_HEAD(&chunk->hash);
154 INIT_LIST_HEAD(&chunk->trees);
155 chunk->count = count;
156 atomic_long_set(&chunk->refs, 1);
157 for (i = 0; i < count; i++) {
158 INIT_LIST_HEAD(&chunk->owners[i].list);
159 chunk->owners[i].index = i;
161 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
162 return chunk;
165 enum {HASH_SIZE = 128};
166 static struct list_head chunk_hash_heads[HASH_SIZE];
167 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
169 static inline struct list_head *chunk_hash(const struct inode *inode)
171 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
172 return chunk_hash_heads + n % HASH_SIZE;
175 /* hash_lock & entry->lock is held by caller */
176 static void insert_hash(struct audit_chunk *chunk)
178 struct fsnotify_mark_entry *entry = &chunk->mark;
179 struct list_head *list;
181 if (!entry->inode)
182 return;
183 list = chunk_hash(entry->inode);
184 list_add_rcu(&chunk->hash, list);
187 /* called under rcu_read_lock */
188 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
190 struct list_head *list = chunk_hash(inode);
191 struct audit_chunk *p;
193 list_for_each_entry_rcu(p, list, hash) {
194 /* mark.inode may have gone NULL, but who cares? */
195 if (p->mark.inode == inode) {
196 atomic_long_inc(&p->refs);
197 return p;
200 return NULL;
203 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
205 int n;
206 for (n = 0; n < chunk->count; n++)
207 if (chunk->owners[n].owner == tree)
208 return 1;
209 return 0;
212 /* tagging and untagging inodes with trees */
214 static struct audit_chunk *find_chunk(struct node *p)
216 int index = p->index & ~(1U<<31);
217 p -= index;
218 return container_of(p, struct audit_chunk, owners[0]);
221 static void untag_chunk(struct node *p)
223 struct audit_chunk *chunk = find_chunk(p);
224 struct fsnotify_mark_entry *entry = &chunk->mark;
225 struct audit_chunk *new;
226 struct audit_tree *owner;
227 int size = chunk->count - 1;
228 int i, j;
230 fsnotify_get_mark(entry);
232 spin_unlock(&hash_lock);
234 spin_lock(&entry->lock);
235 if (chunk->dead || !entry->inode) {
236 spin_unlock(&entry->lock);
237 goto out;
240 owner = p->owner;
242 if (!size) {
243 chunk->dead = 1;
244 spin_lock(&hash_lock);
245 list_del_init(&chunk->trees);
246 if (owner->root == chunk)
247 owner->root = NULL;
248 list_del_init(&p->list);
249 list_del_rcu(&chunk->hash);
250 spin_unlock(&hash_lock);
251 spin_unlock(&entry->lock);
252 fsnotify_destroy_mark_by_entry(entry);
253 fsnotify_put_mark(entry);
254 goto out;
257 new = alloc_chunk(size);
258 if (!new)
259 goto Fallback;
260 fsnotify_duplicate_mark(&new->mark, entry);
261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, 1)) {
262 free_chunk(new);
263 goto Fallback;
266 chunk->dead = 1;
267 spin_lock(&hash_lock);
268 list_replace_init(&chunk->trees, &new->trees);
269 if (owner->root == chunk) {
270 list_del_init(&owner->same_root);
271 owner->root = NULL;
274 for (i = j = 0; i < size; i++, j++) {
275 struct audit_tree *s;
276 if (&chunk->owners[j] == p) {
277 list_del_init(&p->list);
278 i--;
279 continue;
281 s = chunk->owners[j].owner;
282 new->owners[i].owner = s;
283 new->owners[i].index = chunk->owners[j].index - j + i;
284 if (!s) /* result of earlier fallback */
285 continue;
286 get_tree(s);
287 list_replace_init(&chunk->owners[i].list, &new->owners[j].list);
290 list_replace_rcu(&chunk->hash, &new->hash);
291 list_for_each_entry(owner, &new->trees, same_root)
292 owner->root = new;
293 spin_unlock(&hash_lock);
294 spin_unlock(&entry->lock);
295 fsnotify_destroy_mark_by_entry(entry);
296 fsnotify_put_mark(entry);
297 goto out;
299 Fallback:
300 // do the best we can
301 spin_lock(&hash_lock);
302 if (owner->root == chunk) {
303 list_del_init(&owner->same_root);
304 owner->root = NULL;
306 list_del_init(&p->list);
307 p->owner = NULL;
308 put_tree(owner);
309 spin_unlock(&hash_lock);
310 spin_unlock(&entry->lock);
311 out:
312 fsnotify_put_mark(entry);
313 spin_lock(&hash_lock);
316 static int create_chunk(struct inode *inode, struct audit_tree *tree)
318 struct fsnotify_mark_entry *entry;
319 struct audit_chunk *chunk = alloc_chunk(1);
320 if (!chunk)
321 return -ENOMEM;
323 entry = &chunk->mark;
324 if (fsnotify_add_mark(entry, audit_tree_group, inode, 0)) {
325 free_chunk(chunk);
326 return -ENOSPC;
329 spin_lock(&entry->lock);
330 spin_lock(&hash_lock);
331 if (tree->goner) {
332 spin_unlock(&hash_lock);
333 chunk->dead = 1;
334 spin_unlock(&entry->lock);
335 fsnotify_destroy_mark_by_entry(entry);
336 fsnotify_put_mark(entry);
337 return 0;
339 chunk->owners[0].index = (1U << 31);
340 chunk->owners[0].owner = tree;
341 get_tree(tree);
342 list_add(&chunk->owners[0].list, &tree->chunks);
343 if (!tree->root) {
344 tree->root = chunk;
345 list_add(&tree->same_root, &chunk->trees);
347 insert_hash(chunk);
348 spin_unlock(&hash_lock);
349 spin_unlock(&entry->lock);
350 return 0;
353 /* the first tagged inode becomes root of tree */
354 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
356 struct fsnotify_mark_entry *old_entry, *chunk_entry;
357 struct audit_tree *owner;
358 struct audit_chunk *chunk, *old;
359 struct node *p;
360 int n;
362 spin_lock(&inode->i_lock);
363 old_entry = fsnotify_find_mark_entry(audit_tree_group, inode);
364 spin_unlock(&inode->i_lock);
365 if (!old_entry)
366 return create_chunk(inode, tree);
368 old = container_of(old_entry, struct audit_chunk, mark);
370 /* are we already there? */
371 spin_lock(&hash_lock);
372 for (n = 0; n < old->count; n++) {
373 if (old->owners[n].owner == tree) {
374 spin_unlock(&hash_lock);
375 fsnotify_put_mark(old_entry);
376 return 0;
379 spin_unlock(&hash_lock);
381 chunk = alloc_chunk(old->count + 1);
382 if (!chunk)
383 return -ENOMEM;
384 chunk_entry = &chunk->mark;
386 spin_lock(&old_entry->lock);
387 if (!old_entry->inode) {
388 /* old_entry is being shot, lets just lie */
389 spin_unlock(&old_entry->lock);
390 fsnotify_put_mark(old_entry);
391 free_chunk(chunk);
392 return -ENOENT;
395 fsnotify_duplicate_mark(chunk_entry, old_entry);
396 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, 1)) {
397 spin_unlock(&old_entry->lock);
398 free_chunk(chunk);
399 fsnotify_put_mark(old_entry);
400 return -ENOSPC;
403 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
404 spin_lock(&chunk_entry->lock);
405 spin_lock(&hash_lock);
407 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
408 if (tree->goner) {
409 spin_unlock(&hash_lock);
410 chunk->dead = 1;
411 spin_unlock(&chunk_entry->lock);
412 spin_unlock(&old_entry->lock);
414 fsnotify_destroy_mark_by_entry(chunk_entry);
416 fsnotify_put_mark(chunk_entry);
417 fsnotify_put_mark(old_entry);
418 return 0;
420 list_replace_init(&old->trees, &chunk->trees);
421 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
422 struct audit_tree *s = old->owners[n].owner;
423 p->owner = s;
424 p->index = old->owners[n].index;
425 if (!s) /* result of fallback in untag */
426 continue;
427 get_tree(s);
428 list_replace_init(&old->owners[n].list, &p->list);
430 p->index = (chunk->count - 1) | (1U<<31);
431 p->owner = tree;
432 get_tree(tree);
433 list_add(&p->list, &tree->chunks);
434 list_replace_rcu(&old->hash, &chunk->hash);
435 list_for_each_entry(owner, &chunk->trees, same_root)
436 owner->root = chunk;
437 old->dead = 1;
438 if (!tree->root) {
439 tree->root = chunk;
440 list_add(&tree->same_root, &chunk->trees);
442 spin_unlock(&hash_lock);
443 spin_unlock(&chunk_entry->lock);
444 spin_unlock(&old_entry->lock);
445 fsnotify_destroy_mark_by_entry(old_entry);
446 fsnotify_put_mark(old_entry);
447 return 0;
450 static void kill_rules(struct audit_tree *tree)
452 struct audit_krule *rule, *next;
453 struct audit_entry *entry;
454 struct audit_buffer *ab;
456 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
457 entry = container_of(rule, struct audit_entry, rule);
459 list_del_init(&rule->rlist);
460 if (rule->tree) {
461 /* not a half-baked one */
462 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
463 audit_log_format(ab, "op=");
464 audit_log_string(ab, "remove rule");
465 audit_log_format(ab, " dir=");
466 audit_log_untrustedstring(ab, rule->tree->pathname);
467 audit_log_key(ab, rule->filterkey);
468 audit_log_format(ab, " list=%d res=1", rule->listnr);
469 audit_log_end(ab);
470 rule->tree = NULL;
471 list_del_rcu(&entry->list);
472 list_del(&entry->rule.list);
473 call_rcu(&entry->rcu, audit_free_rule_rcu);
479 * finish killing struct audit_tree
481 static void prune_one(struct audit_tree *victim)
483 spin_lock(&hash_lock);
484 while (!list_empty(&victim->chunks)) {
485 struct node *p;
487 p = list_entry(victim->chunks.next, struct node, list);
489 untag_chunk(p);
491 spin_unlock(&hash_lock);
492 put_tree(victim);
495 /* trim the uncommitted chunks from tree */
497 static void trim_marked(struct audit_tree *tree)
499 struct list_head *p, *q;
500 spin_lock(&hash_lock);
501 if (tree->goner) {
502 spin_unlock(&hash_lock);
503 return;
505 /* reorder */
506 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
507 struct node *node = list_entry(p, struct node, list);
508 q = p->next;
509 if (node->index & (1U<<31)) {
510 list_del_init(p);
511 list_add(p, &tree->chunks);
515 while (!list_empty(&tree->chunks)) {
516 struct node *node;
518 node = list_entry(tree->chunks.next, struct node, list);
520 /* have we run out of marked? */
521 if (!(node->index & (1U<<31)))
522 break;
524 untag_chunk(node);
526 if (!tree->root && !tree->goner) {
527 tree->goner = 1;
528 spin_unlock(&hash_lock);
529 mutex_lock(&audit_filter_mutex);
530 kill_rules(tree);
531 list_del_init(&tree->list);
532 mutex_unlock(&audit_filter_mutex);
533 prune_one(tree);
534 } else {
535 spin_unlock(&hash_lock);
539 static void audit_schedule_prune(void);
541 /* called with audit_filter_mutex */
542 int audit_remove_tree_rule(struct audit_krule *rule)
544 struct audit_tree *tree;
545 tree = rule->tree;
546 if (tree) {
547 spin_lock(&hash_lock);
548 list_del_init(&rule->rlist);
549 if (list_empty(&tree->rules) && !tree->goner) {
550 tree->root = NULL;
551 list_del_init(&tree->same_root);
552 tree->goner = 1;
553 list_move(&tree->list, &prune_list);
554 rule->tree = NULL;
555 spin_unlock(&hash_lock);
556 audit_schedule_prune();
557 return 1;
559 rule->tree = NULL;
560 spin_unlock(&hash_lock);
561 return 1;
563 return 0;
566 void audit_trim_trees(void)
568 struct list_head cursor;
570 mutex_lock(&audit_filter_mutex);
571 list_add(&cursor, &tree_list);
572 while (cursor.next != &tree_list) {
573 struct audit_tree *tree;
574 struct path path;
575 struct vfsmount *root_mnt;
576 struct node *node;
577 struct list_head list;
578 int err;
580 tree = container_of(cursor.next, struct audit_tree, list);
581 get_tree(tree);
582 list_del(&cursor);
583 list_add(&cursor, &tree->list);
584 mutex_unlock(&audit_filter_mutex);
586 err = kern_path(tree->pathname, 0, &path);
587 if (err)
588 goto skip_it;
590 root_mnt = collect_mounts(&path);
591 path_put(&path);
592 if (!root_mnt)
593 goto skip_it;
595 list_add_tail(&list, &root_mnt->mnt_list);
596 spin_lock(&hash_lock);
597 list_for_each_entry(node, &tree->chunks, list) {
598 struct audit_chunk *chunk = find_chunk(node);
599 /* this could be NULL if the watch is dieing else where... */
600 struct inode *inode = chunk->mark.inode;
601 struct vfsmount *mnt;
602 node->index |= 1U<<31;
603 list_for_each_entry(mnt, &list, mnt_list) {
604 if (mnt->mnt_root->d_inode == inode) {
605 node->index &= ~(1U<<31);
606 break;
610 spin_unlock(&hash_lock);
611 trim_marked(tree);
612 put_tree(tree);
613 list_del_init(&list);
614 drop_collected_mounts(root_mnt);
615 skip_it:
616 mutex_lock(&audit_filter_mutex);
618 list_del(&cursor);
619 mutex_unlock(&audit_filter_mutex);
622 static int is_under(struct vfsmount *mnt, struct dentry *dentry,
623 struct path *path)
625 if (mnt != path->mnt) {
626 for (;;) {
627 if (mnt->mnt_parent == mnt)
628 return 0;
629 if (mnt->mnt_parent == path->mnt)
630 break;
631 mnt = mnt->mnt_parent;
633 dentry = mnt->mnt_mountpoint;
635 return is_subdir(dentry, path->dentry);
638 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
641 if (pathname[0] != '/' ||
642 rule->listnr != AUDIT_FILTER_EXIT ||
643 op != Audit_equal ||
644 rule->inode_f || rule->watch || rule->tree)
645 return -EINVAL;
646 rule->tree = alloc_tree(pathname);
647 if (!rule->tree)
648 return -ENOMEM;
649 return 0;
652 void audit_put_tree(struct audit_tree *tree)
654 put_tree(tree);
657 /* called with audit_filter_mutex */
658 int audit_add_tree_rule(struct audit_krule *rule)
660 struct audit_tree *seed = rule->tree, *tree;
661 struct path path;
662 struct vfsmount *mnt, *p;
663 struct list_head list;
664 int err;
666 list_for_each_entry(tree, &tree_list, list) {
667 if (!strcmp(seed->pathname, tree->pathname)) {
668 put_tree(seed);
669 rule->tree = tree;
670 list_add(&rule->rlist, &tree->rules);
671 return 0;
674 tree = seed;
675 list_add(&tree->list, &tree_list);
676 list_add(&rule->rlist, &tree->rules);
677 /* do not set rule->tree yet */
678 mutex_unlock(&audit_filter_mutex);
680 err = kern_path(tree->pathname, 0, &path);
681 if (err)
682 goto Err;
683 mnt = collect_mounts(&path);
684 path_put(&path);
685 if (!mnt) {
686 err = -ENOMEM;
687 goto Err;
689 list_add_tail(&list, &mnt->mnt_list);
691 get_tree(tree);
692 list_for_each_entry(p, &list, mnt_list) {
693 err = tag_chunk(p->mnt_root->d_inode, tree);
694 if (err)
695 break;
698 list_del(&list);
699 drop_collected_mounts(mnt);
701 if (!err) {
702 struct node *node;
703 spin_lock(&hash_lock);
704 list_for_each_entry(node, &tree->chunks, list)
705 node->index &= ~(1U<<31);
706 spin_unlock(&hash_lock);
707 } else {
708 trim_marked(tree);
709 goto Err;
712 mutex_lock(&audit_filter_mutex);
713 if (list_empty(&rule->rlist)) {
714 put_tree(tree);
715 return -ENOENT;
717 rule->tree = tree;
718 put_tree(tree);
720 return 0;
721 Err:
722 mutex_lock(&audit_filter_mutex);
723 list_del_init(&tree->list);
724 list_del_init(&tree->rules);
725 put_tree(tree);
726 return err;
729 int audit_tag_tree(char *old, char *new)
731 struct list_head cursor, barrier;
732 int failed = 0;
733 struct path path;
734 struct vfsmount *tagged;
735 struct list_head list;
736 struct vfsmount *mnt;
737 struct dentry *dentry;
738 int err;
740 err = kern_path(new, 0, &path);
741 if (err)
742 return err;
743 tagged = collect_mounts(&path);
744 path_put(&path);
745 if (!tagged)
746 return -ENOMEM;
748 err = kern_path(old, 0, &path);
749 if (err) {
750 drop_collected_mounts(tagged);
751 return err;
753 mnt = mntget(path.mnt);
754 dentry = dget(path.dentry);
755 path_put(&path);
757 list_add_tail(&list, &tagged->mnt_list);
759 mutex_lock(&audit_filter_mutex);
760 list_add(&barrier, &tree_list);
761 list_add(&cursor, &barrier);
763 while (cursor.next != &tree_list) {
764 struct audit_tree *tree;
765 struct vfsmount *p;
767 tree = container_of(cursor.next, struct audit_tree, list);
768 get_tree(tree);
769 list_del(&cursor);
770 list_add(&cursor, &tree->list);
771 mutex_unlock(&audit_filter_mutex);
773 err = kern_path(tree->pathname, 0, &path);
774 if (err) {
775 put_tree(tree);
776 mutex_lock(&audit_filter_mutex);
777 continue;
780 spin_lock(&vfsmount_lock);
781 if (!is_under(mnt, dentry, &path)) {
782 spin_unlock(&vfsmount_lock);
783 path_put(&path);
784 put_tree(tree);
785 mutex_lock(&audit_filter_mutex);
786 continue;
788 spin_unlock(&vfsmount_lock);
789 path_put(&path);
791 list_for_each_entry(p, &list, mnt_list) {
792 failed = tag_chunk(p->mnt_root->d_inode, tree);
793 if (failed)
794 break;
797 if (failed) {
798 put_tree(tree);
799 mutex_lock(&audit_filter_mutex);
800 break;
803 mutex_lock(&audit_filter_mutex);
804 spin_lock(&hash_lock);
805 if (!tree->goner) {
806 list_del(&tree->list);
807 list_add(&tree->list, &tree_list);
809 spin_unlock(&hash_lock);
810 put_tree(tree);
813 while (barrier.prev != &tree_list) {
814 struct audit_tree *tree;
816 tree = container_of(barrier.prev, struct audit_tree, list);
817 get_tree(tree);
818 list_del(&tree->list);
819 list_add(&tree->list, &barrier);
820 mutex_unlock(&audit_filter_mutex);
822 if (!failed) {
823 struct node *node;
824 spin_lock(&hash_lock);
825 list_for_each_entry(node, &tree->chunks, list)
826 node->index &= ~(1U<<31);
827 spin_unlock(&hash_lock);
828 } else {
829 trim_marked(tree);
832 put_tree(tree);
833 mutex_lock(&audit_filter_mutex);
835 list_del(&barrier);
836 list_del(&cursor);
837 list_del(&list);
838 mutex_unlock(&audit_filter_mutex);
839 dput(dentry);
840 mntput(mnt);
841 drop_collected_mounts(tagged);
842 return failed;
846 * That gets run when evict_chunk() ends up needing to kill audit_tree.
847 * Runs from a separate thread.
849 static int prune_tree_thread(void *unused)
851 mutex_lock(&audit_cmd_mutex);
852 mutex_lock(&audit_filter_mutex);
854 while (!list_empty(&prune_list)) {
855 struct audit_tree *victim;
857 victim = list_entry(prune_list.next, struct audit_tree, list);
858 list_del_init(&victim->list);
860 mutex_unlock(&audit_filter_mutex);
862 prune_one(victim);
864 mutex_lock(&audit_filter_mutex);
867 mutex_unlock(&audit_filter_mutex);
868 mutex_unlock(&audit_cmd_mutex);
869 return 0;
872 static void audit_schedule_prune(void)
874 kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
878 * ... and that one is done if evict_chunk() decides to delay until the end
879 * of syscall. Runs synchronously.
881 void audit_kill_trees(struct list_head *list)
883 mutex_lock(&audit_cmd_mutex);
884 mutex_lock(&audit_filter_mutex);
886 while (!list_empty(list)) {
887 struct audit_tree *victim;
889 victim = list_entry(list->next, struct audit_tree, list);
890 kill_rules(victim);
891 list_del_init(&victim->list);
893 mutex_unlock(&audit_filter_mutex);
895 prune_one(victim);
897 mutex_lock(&audit_filter_mutex);
900 mutex_unlock(&audit_filter_mutex);
901 mutex_unlock(&audit_cmd_mutex);
905 * Here comes the stuff asynchronous to auditctl operations
908 /* inode->inotify_mutex is locked */
909 static void evict_chunk(struct audit_chunk *chunk)
911 struct audit_tree *owner;
912 struct list_head *postponed = audit_killed_trees();
913 int need_prune = 0;
914 int n;
916 if (chunk->dead)
917 return;
919 chunk->dead = 1;
920 mutex_lock(&audit_filter_mutex);
921 spin_lock(&hash_lock);
922 while (!list_empty(&chunk->trees)) {
923 owner = list_entry(chunk->trees.next,
924 struct audit_tree, same_root);
925 owner->goner = 1;
926 owner->root = NULL;
927 list_del_init(&owner->same_root);
928 spin_unlock(&hash_lock);
929 if (!postponed) {
930 kill_rules(owner);
931 list_move(&owner->list, &prune_list);
932 need_prune = 1;
933 } else {
934 list_move(&owner->list, postponed);
936 spin_lock(&hash_lock);
938 list_del_rcu(&chunk->hash);
939 for (n = 0; n < chunk->count; n++)
940 list_del_init(&chunk->owners[n].list);
941 spin_unlock(&hash_lock);
942 if (need_prune)
943 audit_schedule_prune();
944 mutex_unlock(&audit_filter_mutex);
947 static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
949 BUG();
950 return -EOPNOTSUPP;
953 static void audit_tree_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
955 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
957 evict_chunk(chunk);
958 fsnotify_put_mark(entry);
961 static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
962 __u32 mask, void *data, int data_type)
964 return 0;
967 static const struct fsnotify_ops audit_tree_ops = {
968 .handle_event = audit_tree_handle_event,
969 .should_send_event = audit_tree_send_event,
970 .free_group_priv = NULL,
971 .free_event_priv = NULL,
972 .freeing_mark = audit_tree_freeing_mark,
975 static int __init audit_tree_init(void)
977 int i;
979 audit_tree_group = fsnotify_obtain_group(0, &audit_tree_ops);
980 if (IS_ERR(audit_tree_group))
981 audit_panic("cannot initialize inotify handle for rectree watches");
983 for (i = 0; i < HASH_SIZE; i++)
984 INIT_LIST_HEAD(&chunk_hash_heads[i]);
986 return 0;
988 __initcall(audit_tree_init);