summaryrefslogtreecommitdiffstats
path: root/fs/notify/inotify/inotify_user.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-11-09 14:54:20 +0100
committerJan Kara <jack@suse.cz>2017-04-10 17:37:35 +0200
commit11375145a70d69e871dd5b8fcadd5d1ee4162e7c (patch)
tree20adde2c3c5b003fcc6c201d71a2c3acde065255 /fs/notify/inotify/inotify_user.c
parentinotify: Do not drop mark reference under idr_lock (diff)
downloadlinux-11375145a70d69e871dd5b8fcadd5d1ee4162e7c.tar.xz
linux-11375145a70d69e871dd5b8fcadd5d1ee4162e7c.zip
fsnotify: Move queueing of mark for destruction into fsnotify_put_mark()
Currently we queue mark into a list of marks for destruction in __fsnotify_free_mark() and keep the last mark reference dangling. After the worker waits for SRCU period, it drops the last reference to the mark which frees it. This scheme has the disadvantage that if we hold reference to a mark and drop and reacquire SRCU lock, the mark can get freed immediately which is slightly inconvenient and we will need to avoid this in the future. Move to a scheme where queueing of mark into a list of marks for destruction happens when the last reference to the mark is dropped. Also drop reference to the mark held by group list already when mark is removed from that list instead of dropping it only from the destruction worker. Reviewed-by: Miklos Szeredi <mszeredi@redhat.com> Reviewed-by: Amir Goldstein <amir73il@gmail.com> Signed-off-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/notify/inotify/inotify_user.c')
-rw-r--r--fs/notify/inotify/inotify_user.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index f9113e57ef33..43cbd1b178c9 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -444,10 +444,9 @@ static void inotify_remove_from_idr(struct fsnotify_group *group,
/*
* One ref for being in the idr
- * one ref held by the caller trying to kill us
* one ref grabbed by inotify_idr_find
*/
- if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
+ if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
/* we can't really recover with bad ref cnting.. */