atm: idt77252: fix dev refcnt leak
[linux/fpc-iii.git] / kernel / audit_tree.c
blob0caf1f8de0fbe0e7fc40eb40ac8640ee7dcbad6b
1 #include "audit.h"
2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
8 struct audit_tree;
9 struct audit_chunk;
11 struct audit_tree {
12 atomic_t count;
13 int goner;
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
19 struct rcu_head head;
20 char pathname[];
23 struct audit_chunk {
24 struct list_head hash;
25 struct fsnotify_mark mark;
26 struct list_head trees; /* with root here */
27 int dead;
28 int count;
29 atomic_long_t refs;
30 struct rcu_head head;
31 struct node {
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
35 } owners[];
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
42 * One struct chunk is attached to each inode of interest.
43 * We replace struct chunk on tagging/untagging.
44 * Rules have pointer to struct audit_tree.
45 * Rules have struct list_head rlist forming a list of rules over
46 * the same tree.
47 * References to struct chunk are collected at audit_inode{,_child}()
48 * time and used in AUDIT_TREE rule matching.
49 * These references are dropped at the same time we are calling
50 * audit_free_names(), etc.
52 * Cyclic lists galore:
53 * tree.chunks anchors chunk.owners[].list hash_lock
54 * tree.rules anchors rule.rlist audit_filter_mutex
55 * chunk.trees anchors tree.same_root hash_lock
56 * chunk.hash is a hash with middle bits of watch.inode as
57 * a hash function. RCU, hash_lock
59 * tree is refcounted; one reference for "some rules on rules_list refer to
60 * it", one for each chunk with pointer to it.
62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
63 * of watch contributes 1 to .refs).
65 * node.index allows to get from node.list to containing chunk.
66 * MSB of that sucker is stolen to mark taggings that we might have to
67 * revert - several operations have very unpleasant cleanup logics and
68 * that makes a difference. Some.
71 static struct fsnotify_group *audit_tree_group;
73 static struct audit_tree *alloc_tree(const char *s)
75 struct audit_tree *tree;
77 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
78 if (tree) {
79 atomic_set(&tree->count, 1);
80 tree->goner = 0;
81 INIT_LIST_HEAD(&tree->chunks);
82 INIT_LIST_HEAD(&tree->rules);
83 INIT_LIST_HEAD(&tree->list);
84 INIT_LIST_HEAD(&tree->same_root);
85 tree->root = NULL;
86 strcpy(tree->pathname, s);
88 return tree;
91 static inline void get_tree(struct audit_tree *tree)
93 atomic_inc(&tree->count);
96 static inline void put_tree(struct audit_tree *tree)
98 if (atomic_dec_and_test(&tree->count))
99 kfree_rcu(tree, head);
102 /* to avoid bringing the entire thing in audit.h */
103 const char *audit_tree_path(struct audit_tree *tree)
105 return tree->pathname;
108 static void free_chunk(struct audit_chunk *chunk)
110 int i;
112 for (i = 0; i < chunk->count; i++) {
113 if (chunk->owners[i].owner)
114 put_tree(chunk->owners[i].owner);
116 kfree(chunk);
119 void audit_put_chunk(struct audit_chunk *chunk)
121 if (atomic_long_dec_and_test(&chunk->refs))
122 free_chunk(chunk);
125 static void __put_chunk(struct rcu_head *rcu)
127 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128 audit_put_chunk(chunk);
131 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
133 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134 call_rcu(&chunk->head, __put_chunk);
137 static struct audit_chunk *alloc_chunk(int count)
139 struct audit_chunk *chunk;
140 size_t size;
141 int i;
143 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144 chunk = kzalloc(size, GFP_KERNEL);
145 if (!chunk)
146 return NULL;
148 INIT_LIST_HEAD(&chunk->hash);
149 INIT_LIST_HEAD(&chunk->trees);
150 chunk->count = count;
151 atomic_long_set(&chunk->refs, 1);
152 for (i = 0; i < count; i++) {
153 INIT_LIST_HEAD(&chunk->owners[i].list);
154 chunk->owners[i].index = i;
156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 return chunk;
160 enum {HASH_SIZE = 128};
161 static struct list_head chunk_hash_heads[HASH_SIZE];
162 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
164 static inline struct list_head *chunk_hash(const struct inode *inode)
166 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
167 return chunk_hash_heads + n % HASH_SIZE;
170 /* hash_lock & entry->lock is held by caller */
171 static void insert_hash(struct audit_chunk *chunk)
173 struct fsnotify_mark *entry = &chunk->mark;
174 struct list_head *list;
176 if (!entry->i.inode)
177 return;
178 list = chunk_hash(entry->i.inode);
179 list_add_rcu(&chunk->hash, list);
182 /* called under rcu_read_lock */
183 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
185 struct list_head *list = chunk_hash(inode);
186 struct audit_chunk *p;
188 list_for_each_entry_rcu(p, list, hash) {
189 /* mark.inode may have gone NULL, but who cares? */
190 if (p->mark.i.inode == inode) {
191 atomic_long_inc(&p->refs);
192 return p;
195 return NULL;
198 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
200 int n;
201 for (n = 0; n < chunk->count; n++)
202 if (chunk->owners[n].owner == tree)
203 return 1;
204 return 0;
207 /* tagging and untagging inodes with trees */
209 static struct audit_chunk *find_chunk(struct node *p)
211 int index = p->index & ~(1U<<31);
212 p -= index;
213 return container_of(p, struct audit_chunk, owners[0]);
216 static void untag_chunk(struct node *p)
218 struct audit_chunk *chunk = find_chunk(p);
219 struct fsnotify_mark *entry = &chunk->mark;
220 struct audit_chunk *new = NULL;
221 struct audit_tree *owner;
222 int size = chunk->count - 1;
223 int i, j;
225 fsnotify_get_mark(entry);
227 spin_unlock(&hash_lock);
229 if (size)
230 new = alloc_chunk(size);
232 spin_lock(&entry->lock);
233 if (chunk->dead || !entry->i.inode) {
234 spin_unlock(&entry->lock);
235 if (new)
236 free_chunk(new);
237 goto out;
240 owner = p->owner;
242 if (!size) {
243 chunk->dead = 1;
244 spin_lock(&hash_lock);
245 list_del_init(&chunk->trees);
246 if (owner->root == chunk)
247 owner->root = NULL;
248 list_del_init(&p->list);
249 list_del_rcu(&chunk->hash);
250 spin_unlock(&hash_lock);
251 spin_unlock(&entry->lock);
252 fsnotify_destroy_mark(entry);
253 goto out;
256 if (!new)
257 goto Fallback;
259 fsnotify_duplicate_mark(&new->mark, entry);
260 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
261 fsnotify_put_mark(&new->mark);
262 goto Fallback;
265 chunk->dead = 1;
266 spin_lock(&hash_lock);
267 list_replace_init(&chunk->trees, &new->trees);
268 if (owner->root == chunk) {
269 list_del_init(&owner->same_root);
270 owner->root = NULL;
273 for (i = j = 0; j <= size; i++, j++) {
274 struct audit_tree *s;
275 if (&chunk->owners[j] == p) {
276 list_del_init(&p->list);
277 i--;
278 continue;
280 s = chunk->owners[j].owner;
281 new->owners[i].owner = s;
282 new->owners[i].index = chunk->owners[j].index - j + i;
283 if (!s) /* result of earlier fallback */
284 continue;
285 get_tree(s);
286 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
289 list_replace_rcu(&chunk->hash, &new->hash);
290 list_for_each_entry(owner, &new->trees, same_root)
291 owner->root = new;
292 spin_unlock(&hash_lock);
293 spin_unlock(&entry->lock);
294 fsnotify_destroy_mark(entry);
295 goto out;
297 Fallback:
298 // do the best we can
299 spin_lock(&hash_lock);
300 if (owner->root == chunk) {
301 list_del_init(&owner->same_root);
302 owner->root = NULL;
304 list_del_init(&p->list);
305 p->owner = NULL;
306 put_tree(owner);
307 spin_unlock(&hash_lock);
308 spin_unlock(&entry->lock);
309 out:
310 fsnotify_put_mark(entry);
311 spin_lock(&hash_lock);
314 static int create_chunk(struct inode *inode, struct audit_tree *tree)
316 struct fsnotify_mark *entry;
317 struct audit_chunk *chunk = alloc_chunk(1);
318 if (!chunk)
319 return -ENOMEM;
321 entry = &chunk->mark;
322 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
323 fsnotify_put_mark(entry);
324 return -ENOSPC;
327 spin_lock(&entry->lock);
328 spin_lock(&hash_lock);
329 if (tree->goner) {
330 spin_unlock(&hash_lock);
331 chunk->dead = 1;
332 spin_unlock(&entry->lock);
333 fsnotify_get_mark(entry);
334 fsnotify_destroy_mark(entry);
335 fsnotify_put_mark(entry);
336 return 0;
338 chunk->owners[0].index = (1U << 31);
339 chunk->owners[0].owner = tree;
340 get_tree(tree);
341 list_add(&chunk->owners[0].list, &tree->chunks);
342 if (!tree->root) {
343 tree->root = chunk;
344 list_add(&tree->same_root, &chunk->trees);
346 insert_hash(chunk);
347 spin_unlock(&hash_lock);
348 spin_unlock(&entry->lock);
349 return 0;
352 /* the first tagged inode becomes root of tree */
353 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
355 struct fsnotify_mark *old_entry, *chunk_entry;
356 struct audit_tree *owner;
357 struct audit_chunk *chunk, *old;
358 struct node *p;
359 int n;
361 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
362 if (!old_entry)
363 return create_chunk(inode, tree);
365 old = container_of(old_entry, struct audit_chunk, mark);
367 /* are we already there? */
368 spin_lock(&hash_lock);
369 for (n = 0; n < old->count; n++) {
370 if (old->owners[n].owner == tree) {
371 spin_unlock(&hash_lock);
372 fsnotify_put_mark(old_entry);
373 return 0;
376 spin_unlock(&hash_lock);
378 chunk = alloc_chunk(old->count + 1);
379 if (!chunk) {
380 fsnotify_put_mark(old_entry);
381 return -ENOMEM;
384 chunk_entry = &chunk->mark;
386 spin_lock(&old_entry->lock);
387 if (!old_entry->i.inode) {
388 /* old_entry is being shot, lets just lie */
389 spin_unlock(&old_entry->lock);
390 fsnotify_put_mark(old_entry);
391 free_chunk(chunk);
392 return -ENOENT;
395 fsnotify_duplicate_mark(chunk_entry, old_entry);
396 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
397 spin_unlock(&old_entry->lock);
398 fsnotify_put_mark(chunk_entry);
399 fsnotify_put_mark(old_entry);
400 return -ENOSPC;
403 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
404 spin_lock(&chunk_entry->lock);
405 spin_lock(&hash_lock);
407 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
408 if (tree->goner) {
409 spin_unlock(&hash_lock);
410 chunk->dead = 1;
411 spin_unlock(&chunk_entry->lock);
412 spin_unlock(&old_entry->lock);
414 fsnotify_get_mark(chunk_entry);
415 fsnotify_destroy_mark(chunk_entry);
417 fsnotify_put_mark(chunk_entry);
418 fsnotify_put_mark(old_entry);
419 return 0;
421 list_replace_init(&old->trees, &chunk->trees);
422 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
423 struct audit_tree *s = old->owners[n].owner;
424 p->owner = s;
425 p->index = old->owners[n].index;
426 if (!s) /* result of fallback in untag */
427 continue;
428 get_tree(s);
429 list_replace_init(&old->owners[n].list, &p->list);
431 p->index = (chunk->count - 1) | (1U<<31);
432 p->owner = tree;
433 get_tree(tree);
434 list_add(&p->list, &tree->chunks);
435 list_replace_rcu(&old->hash, &chunk->hash);
436 list_for_each_entry(owner, &chunk->trees, same_root)
437 owner->root = chunk;
438 old->dead = 1;
439 if (!tree->root) {
440 tree->root = chunk;
441 list_add(&tree->same_root, &chunk->trees);
443 spin_unlock(&hash_lock);
444 spin_unlock(&chunk_entry->lock);
445 spin_unlock(&old_entry->lock);
446 fsnotify_destroy_mark(old_entry);
447 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
448 return 0;
451 static void kill_rules(struct audit_tree *tree)
453 struct audit_krule *rule, *next;
454 struct audit_entry *entry;
455 struct audit_buffer *ab;
457 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
458 entry = container_of(rule, struct audit_entry, rule);
460 list_del_init(&rule->rlist);
461 if (rule->tree) {
462 /* not a half-baked one */
463 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
464 audit_log_format(ab, "op=");
465 audit_log_string(ab, "remove rule");
466 audit_log_format(ab, " dir=");
467 audit_log_untrustedstring(ab, rule->tree->pathname);
468 audit_log_key(ab, rule->filterkey);
469 audit_log_format(ab, " list=%d res=1", rule->listnr);
470 audit_log_end(ab);
471 rule->tree = NULL;
472 list_del_rcu(&entry->list);
473 list_del(&entry->rule.list);
474 call_rcu(&entry->rcu, audit_free_rule_rcu);
480 * finish killing struct audit_tree
482 static void prune_one(struct audit_tree *victim)
484 spin_lock(&hash_lock);
485 while (!list_empty(&victim->chunks)) {
486 struct node *p;
488 p = list_entry(victim->chunks.next, struct node, list);
490 untag_chunk(p);
492 spin_unlock(&hash_lock);
493 put_tree(victim);
496 /* trim the uncommitted chunks from tree */
498 static void trim_marked(struct audit_tree *tree)
500 struct list_head *p, *q;
501 spin_lock(&hash_lock);
502 if (tree->goner) {
503 spin_unlock(&hash_lock);
504 return;
506 /* reorder */
507 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
508 struct node *node = list_entry(p, struct node, list);
509 q = p->next;
510 if (node->index & (1U<<31)) {
511 list_del_init(p);
512 list_add(p, &tree->chunks);
516 while (!list_empty(&tree->chunks)) {
517 struct node *node;
519 node = list_entry(tree->chunks.next, struct node, list);
521 /* have we run out of marked? */
522 if (!(node->index & (1U<<31)))
523 break;
525 untag_chunk(node);
527 if (!tree->root && !tree->goner) {
528 tree->goner = 1;
529 spin_unlock(&hash_lock);
530 mutex_lock(&audit_filter_mutex);
531 kill_rules(tree);
532 list_del_init(&tree->list);
533 mutex_unlock(&audit_filter_mutex);
534 prune_one(tree);
535 } else {
536 spin_unlock(&hash_lock);
540 static void audit_schedule_prune(void);
542 /* called with audit_filter_mutex */
543 int audit_remove_tree_rule(struct audit_krule *rule)
545 struct audit_tree *tree;
546 tree = rule->tree;
547 if (tree) {
548 spin_lock(&hash_lock);
549 list_del_init(&rule->rlist);
550 if (list_empty(&tree->rules) && !tree->goner) {
551 tree->root = NULL;
552 list_del_init(&tree->same_root);
553 tree->goner = 1;
554 list_move(&tree->list, &prune_list);
555 rule->tree = NULL;
556 spin_unlock(&hash_lock);
557 audit_schedule_prune();
558 return 1;
560 rule->tree = NULL;
561 spin_unlock(&hash_lock);
562 return 1;
564 return 0;
567 static int compare_root(struct vfsmount *mnt, void *arg)
569 return mnt->mnt_root->d_inode == arg;
572 void audit_trim_trees(void)
574 struct list_head cursor;
576 mutex_lock(&audit_filter_mutex);
577 list_add(&cursor, &tree_list);
578 while (cursor.next != &tree_list) {
579 struct audit_tree *tree;
580 struct path path;
581 struct vfsmount *root_mnt;
582 struct node *node;
583 int err;
585 tree = container_of(cursor.next, struct audit_tree, list);
586 get_tree(tree);
587 list_del(&cursor);
588 list_add(&cursor, &tree->list);
589 mutex_unlock(&audit_filter_mutex);
591 err = kern_path(tree->pathname, 0, &path);
592 if (err)
593 goto skip_it;
595 root_mnt = collect_mounts(&path);
596 path_put(&path);
597 if (!root_mnt)
598 goto skip_it;
600 spin_lock(&hash_lock);
601 list_for_each_entry(node, &tree->chunks, list) {
602 struct audit_chunk *chunk = find_chunk(node);
603 /* this could be NULL if the watch is dying else where... */
604 struct inode *inode = chunk->mark.i.inode;
605 node->index |= 1U<<31;
606 if (iterate_mounts(compare_root, inode, root_mnt))
607 node->index &= ~(1U<<31);
609 spin_unlock(&hash_lock);
610 trim_marked(tree);
611 drop_collected_mounts(root_mnt);
612 skip_it:
613 put_tree(tree);
614 mutex_lock(&audit_filter_mutex);
616 list_del(&cursor);
617 mutex_unlock(&audit_filter_mutex);
620 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
623 if (pathname[0] != '/' ||
624 rule->listnr != AUDIT_FILTER_EXIT ||
625 op != Audit_equal ||
626 rule->inode_f || rule->watch || rule->tree)
627 return -EINVAL;
628 rule->tree = alloc_tree(pathname);
629 if (!rule->tree)
630 return -ENOMEM;
631 return 0;
634 void audit_put_tree(struct audit_tree *tree)
636 put_tree(tree);
639 static int tag_mount(struct vfsmount *mnt, void *arg)
641 return tag_chunk(mnt->mnt_root->d_inode, arg);
644 /* called with audit_filter_mutex */
645 int audit_add_tree_rule(struct audit_krule *rule)
647 struct audit_tree *seed = rule->tree, *tree;
648 struct path path;
649 struct vfsmount *mnt;
650 int err;
652 list_for_each_entry(tree, &tree_list, list) {
653 if (!strcmp(seed->pathname, tree->pathname)) {
654 put_tree(seed);
655 rule->tree = tree;
656 list_add(&rule->rlist, &tree->rules);
657 return 0;
660 tree = seed;
661 list_add(&tree->list, &tree_list);
662 list_add(&rule->rlist, &tree->rules);
663 /* do not set rule->tree yet */
664 mutex_unlock(&audit_filter_mutex);
666 err = kern_path(tree->pathname, 0, &path);
667 if (err)
668 goto Err;
669 mnt = collect_mounts(&path);
670 path_put(&path);
671 if (!mnt) {
672 err = -ENOMEM;
673 goto Err;
676 get_tree(tree);
677 err = iterate_mounts(tag_mount, tree, mnt);
678 drop_collected_mounts(mnt);
680 if (!err) {
681 struct node *node;
682 spin_lock(&hash_lock);
683 list_for_each_entry(node, &tree->chunks, list)
684 node->index &= ~(1U<<31);
685 spin_unlock(&hash_lock);
686 } else {
687 trim_marked(tree);
688 goto Err;
691 mutex_lock(&audit_filter_mutex);
692 if (list_empty(&rule->rlist)) {
693 put_tree(tree);
694 return -ENOENT;
696 rule->tree = tree;
697 put_tree(tree);
699 return 0;
700 Err:
701 mutex_lock(&audit_filter_mutex);
702 list_del_init(&tree->list);
703 list_del_init(&tree->rules);
704 put_tree(tree);
705 return err;
708 int audit_tag_tree(char *old, char *new)
710 struct list_head cursor, barrier;
711 int failed = 0;
712 struct path path1, path2;
713 struct vfsmount *tagged;
714 int err;
716 err = kern_path(new, 0, &path2);
717 if (err)
718 return err;
719 tagged = collect_mounts(&path2);
720 path_put(&path2);
721 if (!tagged)
722 return -ENOMEM;
724 err = kern_path(old, 0, &path1);
725 if (err) {
726 drop_collected_mounts(tagged);
727 return err;
730 mutex_lock(&audit_filter_mutex);
731 list_add(&barrier, &tree_list);
732 list_add(&cursor, &barrier);
734 while (cursor.next != &tree_list) {
735 struct audit_tree *tree;
736 int good_one = 0;
738 tree = container_of(cursor.next, struct audit_tree, list);
739 get_tree(tree);
740 list_del(&cursor);
741 list_add(&cursor, &tree->list);
742 mutex_unlock(&audit_filter_mutex);
744 err = kern_path(tree->pathname, 0, &path2);
745 if (!err) {
746 good_one = path_is_under(&path1, &path2);
747 path_put(&path2);
750 if (!good_one) {
751 put_tree(tree);
752 mutex_lock(&audit_filter_mutex);
753 continue;
756 failed = iterate_mounts(tag_mount, tree, tagged);
757 if (failed) {
758 put_tree(tree);
759 mutex_lock(&audit_filter_mutex);
760 break;
763 mutex_lock(&audit_filter_mutex);
764 spin_lock(&hash_lock);
765 if (!tree->goner) {
766 list_del(&tree->list);
767 list_add(&tree->list, &tree_list);
769 spin_unlock(&hash_lock);
770 put_tree(tree);
773 while (barrier.prev != &tree_list) {
774 struct audit_tree *tree;
776 tree = container_of(barrier.prev, struct audit_tree, list);
777 get_tree(tree);
778 list_del(&tree->list);
779 list_add(&tree->list, &barrier);
780 mutex_unlock(&audit_filter_mutex);
782 if (!failed) {
783 struct node *node;
784 spin_lock(&hash_lock);
785 list_for_each_entry(node, &tree->chunks, list)
786 node->index &= ~(1U<<31);
787 spin_unlock(&hash_lock);
788 } else {
789 trim_marked(tree);
792 put_tree(tree);
793 mutex_lock(&audit_filter_mutex);
795 list_del(&barrier);
796 list_del(&cursor);
797 mutex_unlock(&audit_filter_mutex);
798 path_put(&path1);
799 drop_collected_mounts(tagged);
800 return failed;
804 * That gets run when evict_chunk() ends up needing to kill audit_tree.
805 * Runs from a separate thread.
807 static int prune_tree_thread(void *unused)
809 mutex_lock(&audit_cmd_mutex);
810 mutex_lock(&audit_filter_mutex);
812 while (!list_empty(&prune_list)) {
813 struct audit_tree *victim;
815 victim = list_entry(prune_list.next, struct audit_tree, list);
816 list_del_init(&victim->list);
818 mutex_unlock(&audit_filter_mutex);
820 prune_one(victim);
822 mutex_lock(&audit_filter_mutex);
825 mutex_unlock(&audit_filter_mutex);
826 mutex_unlock(&audit_cmd_mutex);
827 return 0;
830 static void audit_schedule_prune(void)
832 kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
836 * ... and that one is done if evict_chunk() decides to delay until the end
837 * of syscall. Runs synchronously.
839 void audit_kill_trees(struct list_head *list)
841 mutex_lock(&audit_cmd_mutex);
842 mutex_lock(&audit_filter_mutex);
844 while (!list_empty(list)) {
845 struct audit_tree *victim;
847 victim = list_entry(list->next, struct audit_tree, list);
848 kill_rules(victim);
849 list_del_init(&victim->list);
851 mutex_unlock(&audit_filter_mutex);
853 prune_one(victim);
855 mutex_lock(&audit_filter_mutex);
858 mutex_unlock(&audit_filter_mutex);
859 mutex_unlock(&audit_cmd_mutex);
863 * Here comes the stuff asynchronous to auditctl operations
866 static void evict_chunk(struct audit_chunk *chunk)
868 struct audit_tree *owner;
869 struct list_head *postponed = audit_killed_trees();
870 int need_prune = 0;
871 int n;
873 if (chunk->dead)
874 return;
876 chunk->dead = 1;
877 mutex_lock(&audit_filter_mutex);
878 spin_lock(&hash_lock);
879 while (!list_empty(&chunk->trees)) {
880 owner = list_entry(chunk->trees.next,
881 struct audit_tree, same_root);
882 owner->goner = 1;
883 owner->root = NULL;
884 list_del_init(&owner->same_root);
885 spin_unlock(&hash_lock);
886 if (!postponed) {
887 kill_rules(owner);
888 list_move(&owner->list, &prune_list);
889 need_prune = 1;
890 } else {
891 list_move(&owner->list, postponed);
893 spin_lock(&hash_lock);
895 list_del_rcu(&chunk->hash);
896 for (n = 0; n < chunk->count; n++)
897 list_del_init(&chunk->owners[n].list);
898 spin_unlock(&hash_lock);
899 if (need_prune)
900 audit_schedule_prune();
901 mutex_unlock(&audit_filter_mutex);
904 static int audit_tree_handle_event(struct fsnotify_group *group,
905 struct fsnotify_mark *inode_mark,
906 struct fsnotify_mark *vfsmonut_mark,
907 struct fsnotify_event *event)
909 BUG();
910 return -EOPNOTSUPP;
913 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
915 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
917 evict_chunk(chunk);
918 fsnotify_put_mark(entry);
921 static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
922 struct fsnotify_mark *inode_mark,
923 struct fsnotify_mark *vfsmount_mark,
924 __u32 mask, void *data, int data_type)
926 return false;
929 static const struct fsnotify_ops audit_tree_ops = {
930 .handle_event = audit_tree_handle_event,
931 .should_send_event = audit_tree_send_event,
932 .free_group_priv = NULL,
933 .free_event_priv = NULL,
934 .freeing_mark = audit_tree_freeing_mark,
937 static int __init audit_tree_init(void)
939 int i;
941 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
942 if (IS_ERR(audit_tree_group))
943 audit_panic("cannot initialize fsnotify group for rectree watches");
945 for (i = 0; i < HASH_SIZE; i++)
946 INIT_LIST_HEAD(&chunk_hash_heads[i]);
948 return 0;
950 __initcall(audit_tree_init);