summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-01-10 00:19:03 +0100
committerAl Viro <viro@zeniv.linux.org.uk>2015-01-26 05:16:29 +0100
commit360f54796ed65939093ae373b92ebd5ef3341776 (patch)
treeb89eeed53d0f4ff4ca9b753ca3f239e6fa0be005 /fs/dcache.c
parentpull bumping refcount into ->kill() (diff)
downloadlinux-360f54796ed65939093ae373b92ebd5ef3341776.tar.xz
linux-360f54796ed65939093ae373b92ebd5ef3341776.zip
dcache: let the dentry count go down to zero without taking d_lock
We can be more aggressive about this, if we are clever and careful. This is subtle. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c118
1 files changed, 114 insertions, 4 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 40432e59d72e..a14d00e9839e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
- BUG_ON((int)dentry->d_lockref.count > 0);
+ BUG_ON(dentry->d_lockref.count > 0);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
struct dentry *parent = dentry->d_parent;
if (IS_ROOT(dentry))
return NULL;
- if (unlikely((int)dentry->d_lockref.count < 0))
+ if (unlikely(dentry->d_lockref.count < 0))
return NULL;
if (likely(spin_trylock(&parent->d_lock)))
return parent;
@@ -590,6 +590,110 @@ again:
return parent;
}
+/*
+ * Try to do a lockless dput(), and return whether that was successful.
+ *
+ * If unsuccessful, we return false, having already taken the dentry lock.
+ *
+ * The caller needs to hold the RCU read lock, so that the dentry is
+ * guaranteed to stay around even if the refcount goes down to zero!
+ */
+static inline bool fast_dput(struct dentry *dentry)
+{
+ int ret;
+ unsigned int d_flags;
+
+ /*
+ * If we have a d_op->d_delete() operation, we sould not
+ * let the dentry count go to zero, so use "put__or_lock".
+ */
+ if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
+ return lockref_put_or_lock(&dentry->d_lockref);
+
+ /*
+ * .. otherwise, we can try to just decrement the
+ * lockref optimistically.
+ */
+ ret = lockref_put_return(&dentry->d_lockref);
+
+ /*
+ * If the lockref_put_return() failed due to the lock being held
+ * by somebody else, the fast path has failed. We will need to
+ * get the lock, and then check the count again.
+ */
+ if (unlikely(ret < 0)) {
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_lockref.count > 1) {
+ dentry->d_lockref.count--;
+ spin_unlock(&dentry->d_lock);
+ return 1;
+ }
+ return 0;
+ }
+
+ /*
+ * If we weren't the last ref, we're done.
+ */
+ if (ret)
+ return 1;
+
+ /*
+ * Careful, careful. The reference count went down
+ * to zero, but we don't hold the dentry lock, so
+ * somebody else could get it again, and do another
+ * dput(), and we need to not race with that.
+ *
+ * However, there is a very special and common case
+ * where we don't care, because there is nothing to
+ * do: the dentry is still hashed, it does not have
+ * a 'delete' op, and it's referenced and already on
+ * the LRU list.
+ *
+ * NOTE! Since we aren't locked, these values are
+ * not "stable". However, it is sufficient that at
+ * some point after we dropped the reference the
+ * dentry was hashed and the flags had the proper
+ * value. Other dentry users may have re-gotten
+ * a reference to the dentry and change that, but
+ * our work is done - we can leave the dentry
+ * around with a zero refcount.
+ */
+ smp_rmb();
+ d_flags = ACCESS_ONCE(dentry->d_flags);
+ d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
+
+ /* Nothing to do? Dropping the reference was all we needed? */
+ if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
+ return 1;
+
+ /*
+ * Not the fast normal case? Get the lock. We've already decremented
+ * the refcount, but we'll need to re-check the situation after
+ * getting the lock.
+ */
+ spin_lock(&dentry->d_lock);
+
+ /*
+ * Did somebody else grab a reference to it in the meantime, and
+ * we're no longer the last user after all? Alternatively, somebody
+ * else could have killed it and marked it dead. Either way, we
+ * don't need to do anything else.
+ */
+ if (dentry->d_lockref.count) {
+ spin_unlock(&dentry->d_lock);
+ return 1;
+ }
+
+ /*
+ * Re-get the reference we optimistically dropped. We hold the
+ * lock, and we just tested that it was zero, so we can just
+ * set it to 1.
+ */
+ dentry->d_lockref.count = 1;
+ return 0;
+}
+
+
/*
* This is dput
*
@@ -622,8 +726,14 @@ void dput(struct dentry *dentry)
return;
repeat:
- if (lockref_put_or_lock(&dentry->d_lockref))
+ rcu_read_lock();
+ if (likely(fast_dput(dentry))) {
+ rcu_read_unlock();
return;
+ }
+
+ /* Slow case: now with the dentry lock held */
+ rcu_read_unlock();
/* Unreachable? Get rid of it */
if (unlikely(d_unhashed(dentry)))
@@ -810,7 +920,7 @@ static void shrink_dentry_list(struct list_head *list)
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
- if ((int)dentry->d_lockref.count > 0) {
+ if (dentry->d_lockref.count > 0) {
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);