summaryrefslogtreecommitdiffstats
path: root/kernel/audit_tree.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2009-06-24 06:02:38 +0200
committerAl Viro <viro@zeniv.linux.org.uk>2009-06-24 06:02:38 +0200
commit916d75761c971b6e630a26bd4ba472e90ac9a4b9 (patch)
tree3a4b18d0d29c1d12f64fefbb2bc5559813a686f7 /kernel/audit_tree.c
parentAudit: clean up all op= output to include string quoting (diff)
downloadlinux-916d75761c971b6e630a26bd4ba472e90ac9a4b9.tar.xz
linux-916d75761c971b6e630a26bd4ba472e90ac9a4b9.zip
Fix rule eviction order for AUDIT_DIR
If syscall removes the root of subtree being watched, we definitely do not want the rules refering that subtree to be destroyed without the syscall in question having a chance to match them. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r--kernel/audit_tree.c56
1 files changed, 51 insertions, 5 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 3ff0731284a1..2451dc6f3282 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -2,6 +2,7 @@
#include <linux/inotify.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/kthread.h>
struct audit_tree;
struct audit_chunk;
@@ -517,6 +518,8 @@ static void trim_marked(struct audit_tree *tree)
}
}
+static void audit_schedule_prune(void);
+
/* called with audit_filter_mutex */
int audit_remove_tree_rule(struct audit_krule *rule)
{
@@ -822,10 +825,11 @@ int audit_tag_tree(char *old, char *new)
/*
* That gets run when evict_chunk() ends up needing to kill audit_tree.
- * Runs from a separate thread, with audit_cmd_mutex held.
+ * Runs from a separate thread.
*/
-void audit_prune_trees(void)
+static int prune_tree_thread(void *unused)
{
+ mutex_lock(&audit_cmd_mutex);
mutex_lock(&audit_filter_mutex);
while (!list_empty(&prune_list)) {
@@ -842,6 +846,40 @@ void audit_prune_trees(void)
}
mutex_unlock(&audit_filter_mutex);
+ mutex_unlock(&audit_cmd_mutex);
+ return 0;
+}
+
+static void audit_schedule_prune(void)
+{
+ kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
+}
+
+/*
+ * ... and that one is done if evict_chunk() decides to delay until the end
+ * of syscall. Runs synchronously.
+ */
+void audit_kill_trees(struct list_head *list)
+{
+ mutex_lock(&audit_cmd_mutex);
+ mutex_lock(&audit_filter_mutex);
+
+ while (!list_empty(list)) {
+ struct audit_tree *victim;
+
+ victim = list_entry(list->next, struct audit_tree, list);
+ kill_rules(victim);
+ list_del_init(&victim->list);
+
+ mutex_unlock(&audit_filter_mutex);
+
+ prune_one(victim);
+
+ mutex_lock(&audit_filter_mutex);
+ }
+
+ mutex_unlock(&audit_filter_mutex);
+ mutex_unlock(&audit_cmd_mutex);
}
/*
@@ -852,6 +890,8 @@ void audit_prune_trees(void)
static void evict_chunk(struct audit_chunk *chunk)
{
struct audit_tree *owner;
+ struct list_head *postponed = audit_killed_trees();
+ int need_prune = 0;
int n;
if (chunk->dead)
@@ -867,15 +907,21 @@ static void evict_chunk(struct audit_chunk *chunk)
owner->root = NULL;
list_del_init(&owner->same_root);
spin_unlock(&hash_lock);
- kill_rules(owner);
- list_move(&owner->list, &prune_list);
- audit_schedule_prune();
+ if (!postponed) {
+ kill_rules(owner);
+ list_move(&owner->list, &prune_list);
+ need_prune = 1;
+ } else {
+ list_move(&owner->list, postponed);
+ }
spin_lock(&hash_lock);
}
list_del_rcu(&chunk->hash);
for (n = 0; n < chunk->count; n++)
list_del_init(&chunk->owners[n].list);
spin_unlock(&hash_lock);
+ if (need_prune)
+ audit_schedule_prune();
mutex_unlock(&audit_filter_mutex);
}