summaryrefslogtreecommitdiffstats
path: root/kernel/audit_tree.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-21 12:15:30 +0100
committerJan Kara <jack@suse.cz>2017-04-10 17:37:36 +0200
commit6b3f05d24d355f50f3d9814304650fcab0efb482 (patch)
tree362bb47ec0f38adc2cdd05f28e3a25ece2f66787 /kernel/audit_tree.c
parentfsnotify: Move queueing of mark for destruction into fsnotify_put_mark() (diff)
downloadlinux-6b3f05d24d355f50f3d9814304650fcab0efb482.tar.xz
linux-6b3f05d24d355f50f3d9814304650fcab0efb482.zip
fsnotify: Detach mark from object list when last reference is dropped
Instead of removing mark from object list from fsnotify_detach_mark(), remove the mark when last reference to the mark is dropped. This will allow fanotify to wait for userspace response to event without having to hold onto fsnotify_mark_srcu. To avoid pinning inodes by elevated refcount (and thus e.g. delaying file deletion) while someone holds mark reference, we detach connector from the object also from fsnotify_destroy_marks() and not only after removing last mark from the list as it was now. Reviewed-by: Miklos Szeredi <mszeredi@redhat.com> Reviewed-by: Amir Goldstein <amir73il@gmail.com> Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to '')
-rw-r--r--kernel/audit_tree.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index c3b5fcb8eca4..2fa8d61b6fd2 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -172,27 +172,18 @@ static unsigned long inode_to_key(const struct inode *inode)
/*
* Function to return search key in our hash from chunk. Key 0 is special and
* should never be present in the hash.
- *
- * Must be called with chunk->mark.lock held to protect from connector
- * becoming NULL.
*/
-static unsigned long __chunk_to_key(struct audit_chunk *chunk)
+static unsigned long chunk_to_key(struct audit_chunk *chunk)
{
- if (!chunk->mark.connector)
+ /*
+ * We have a reference to the mark so it should be attached to a
+ * connector.
+ */
+ if (WARN_ON_ONCE(!chunk->mark.connector))
return 0;
return (unsigned long)chunk->mark.connector->inode;
}
-static unsigned long chunk_to_key(struct audit_chunk *chunk)
-{
- unsigned long key;
-
- spin_lock(&chunk->mark.lock);
- key = __chunk_to_key(chunk);
- spin_unlock(&chunk->mark.lock);
- return key;
-}
-
static inline struct list_head *chunk_hash(unsigned long key)
{
unsigned long n = key / L1_CACHE_BYTES;
@@ -202,7 +193,7 @@ static inline struct list_head *chunk_hash(unsigned long key)
/* hash_lock & entry->lock is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
- unsigned long key = __chunk_to_key(chunk);
+ unsigned long key = chunk_to_key(chunk);
struct list_head *list;
if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
@@ -263,6 +254,10 @@ static void untag_chunk(struct node *p)
mutex_lock(&entry->group->mark_mutex);
spin_lock(&entry->lock);
+ /*
+ * mark_mutex protects mark from getting detached and thus also from
+ * mark->connector->inode getting NULL.
+ */
if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
spin_unlock(&entry->lock);
mutex_unlock(&entry->group->mark_mutex);
@@ -423,6 +418,10 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
mutex_lock(&old_entry->group->mark_mutex);
spin_lock(&old_entry->lock);
+ /*
+ * mark_mutex protects mark from getting detached and thus also from
+ * mark->connector->inode getting NULL.
+ */
if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
/* old_entry is being shot, lets just lie */
spin_unlock(&old_entry->lock);