summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2023-10-30 06:09:50 +0100
committerAl Viro <viro@zeniv.linux.org.uk>2023-11-25 08:33:56 +0100
commit2f42f1eb9093834b635991c70d0273fbe249eabf (patch)
treece60831fa5018e50135b59e8d41d4ead48943638 /fs/dcache.c
parentdentry_kill(): don't bother with retain_dentry() on slow path (diff)
downloadlinux-2f42f1eb9093834b635991c70d0273fbe249eabf.tar.xz
linux-2f42f1eb9093834b635991c70d0273fbe249eabf.zip
Call retain_dentry() with refcount 0
Instead of bumping it from 0 to 1, calling retain_dentry(), then decrementing it back to 0 (with ->d_lock held all the way through), just leave refcount at 0 through all of that. It will have a visible effect for ->d_delete() - now it can be called with refcount 0 instead of 1 and it can no longer play silly buggers with dropping/regaining ->d_lock. Not that any in-tree instances tried to (it's pretty hard to get right). Any out-of-tree ones will have to adjust (assuming they need any changes). Note that we do not need to extend rcu-critical area here - we have verified that refcount is non-negative after having grabbed ->d_lock, so nobody will be able to free dentry until they get into __dentry_kill(), which won't happen until they manage to grab ->d_lock. Reviewed-by: Christian Brauner <brauner@kernel.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c10
1 files changed, 2 insertions, 8 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 80992e49561c..8ce0fe70f303 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -888,15 +888,14 @@ void dput(struct dentry *dentry)
}
/* Slow case: now with the dentry lock held */
- dentry->d_lockref.count = 1;
rcu_read_unlock();
if (likely(retain_dentry(dentry))) {
- dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
return;
}
+ dentry->d_lockref.count = 1;
dentry = dentry_kill(dentry);
}
}
@@ -921,13 +920,8 @@ void dput_to_list(struct dentry *dentry, struct list_head *list)
return;
}
rcu_read_unlock();
- dentry->d_lockref.count = 1;
- if (!retain_dentry(dentry)) {
- --dentry->d_lockref.count;
+ if (!retain_dentry(dentry))
to_shrink_list(dentry, list);
- } else {
- --dentry->d_lockref.count;
- }
spin_unlock(&dentry->d_lock);
}