summaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@kernel.dk>2011-01-07 07:49:43 +0100
committerNick Piggin <npiggin@kernel.dk>2011-01-07 07:50:24 +0100
commitdc0474be3e27463d4d4a2793f82366eed906f223 (patch)
tree41f75e638442cb343bacdcfbabb17ffc3bd5b4ce /fs/dcache.c
parentfs: dcache reduce dcache_inode_lock (diff)
downloadlinux-dc0474be3e27463d4d4a2793f82366eed906f223.tar.xz
linux-dc0474be3e27463d4d4a2793f82366eed906f223.zip
fs: dcache rationalise dget variants
dget_locked was a shortcut to avoid the lazy lru manipulation when we already held dcache_lock (lru manipulation was relatively cheap at that point). However, how that the lru lock is an innermost one, we never hold it at any caller, so the lock cost can now be avoided. We already have well working lazy dcache LRU, so it should be fine to defer LRU manipulations to scan time. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c36
1 files changed, 11 insertions, 25 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 01f016799fd4..b4d2e28eef5b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -429,32 +429,17 @@ int d_invalidate(struct dentry * dentry)
EXPORT_SYMBOL(d_invalidate);
/* This must be called with d_lock held */
-static inline struct dentry * __dget_locked_dlock(struct dentry *dentry)
+static inline void __dget_dlock(struct dentry *dentry)
{
dentry->d_count++;
- dentry_lru_del(dentry);
- return dentry;
}
-/* This must be called with d_lock held */
-static inline struct dentry * __dget_locked(struct dentry *dentry)
+static inline void __dget(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
- __dget_locked_dlock(dentry);
+ __dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
- return dentry;
-}
-
-struct dentry * dget_locked_dlock(struct dentry *dentry)
-{
- return __dget_locked_dlock(dentry);
-}
-
-struct dentry * dget_locked(struct dentry *dentry)
-{
- return __dget_locked(dentry);
}
-EXPORT_SYMBOL(dget_locked);
struct dentry *dget_parent(struct dentry *dentry)
{
@@ -512,7 +497,7 @@ again:
(alias->d_flags & DCACHE_DISCONNECTED)) {
discon_alias = alias;
} else if (!want_discon) {
- __dget_locked_dlock(alias);
+ __dget_dlock(alias);
spin_unlock(&alias->d_lock);
return alias;
}
@@ -525,7 +510,7 @@ again:
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) &&
(alias->d_flags & DCACHE_DISCONNECTED)) {
- __dget_locked_dlock(alias);
+ __dget_dlock(alias);
spin_unlock(&alias->d_lock);
return alias;
}
@@ -561,7 +546,7 @@ restart:
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (!dentry->d_count) {
- __dget_locked_dlock(dentry);
+ __dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_inode_lock);
@@ -1257,7 +1242,8 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
* don't need child lock because it is not subject
* to concurrency here
*/
- dentry->d_parent = dget_dlock(parent);
+ __dget_dlock(parent);
+ dentry->d_parent = parent;
dentry->d_sb = parent->d_sb;
list_add(&dentry->d_u.d_child, &parent->d_subdirs);
spin_unlock(&parent->d_lock);
@@ -1360,7 +1346,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
continue;
if (memcmp(qstr->name, name, len))
continue;
- dget_locked(alias);
+ __dget(alias);
return alias;
}
@@ -1613,7 +1599,7 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
* reference to it, move it in place and use it.
*/
new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
- dget_locked(new);
+ __dget(new);
spin_unlock(&dcache_inode_lock);
security_d_instantiate(found, inode);
d_move(new, found);
@@ -1789,7 +1775,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent)
list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
if (dentry == child) {
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- __dget_locked_dlock(dentry);
+ __dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&dparent->d_lock);
return 1;