summaryrefslogtreecommitdiffstats
path: root/fs/kernfs/dir.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-02-03 20:02:59 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-02-08 00:42:40 +0100
commit182fd64b66342219d6fcf2b84d337529d120d95c (patch)
treef8e0377fa5cdf90ea11245fd4f028c07ada4a5cd /fs/kernfs/dir.c
parentkernfs: remove kernfs_addrm_cxt (diff)
downloadlinux-182fd64b66342219d6fcf2b84d337529d120d95c.tar.xz
linux-182fd64b66342219d6fcf2b84d337529d120d95c.zip
kernfs: remove KERNFS_ACTIVE_REF and add kernfs_lockdep()
There currently are two mechanisms gating active ref lockdep annotations - KERNFS_LOCKDEP flag and KERNFS_ACTIVE_REF type mask. The former disables lockdep annotations in kernfs_get/put_active() while the latter disables all of kernfs_deactivate(). While KERNFS_ACTIVE_REF also behaves as an optimization to skip the deactivation step for non-file nodes, the benefit is marginal and it needlessly diverges code paths. Let's drop KERNFS_ACTIVE_REF. While at it, add a test helper kernfs_lockdep() to test KERNFS_LOCKDEP flag so that it's more convenient and the related code can be compiled out when not enabled. v2: Refreshed on top of ("kernfs: make kernfs_deactivate() honor KERNFS_LOCKDEP flag"). As the earlier patch already added KERNFS_LOCKDEP tests to kernfs_deactivate(), those additions are dropped from this patch and the existing ones are simply converted to kernfs_lockdep(). Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/kernfs/dir.c')
-rw-r--r--fs/kernfs/dir.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 948551d222b4..5cf137b63db9 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -22,6 +22,15 @@ DEFINE_MUTEX(kernfs_mutex);
#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
+static bool kernfs_lockdep(struct kernfs_node *kn)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ return kn->flags & KERNFS_LOCKDEP;
+#else
+ return false;
+#endif
+}
+
/**
* kernfs_name_hash
* @name: Null terminated string to hash
@@ -144,7 +153,7 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
if (!atomic_inc_unless_negative(&kn->active))
return NULL;
- if (kn->flags & KERNFS_LOCKDEP)
+ if (kernfs_lockdep(kn))
rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
return kn;
}
@@ -164,7 +173,7 @@ void kernfs_put_active(struct kernfs_node *kn)
if (unlikely(!kn))
return;
- if (kn->flags & KERNFS_LOCKDEP)
+ if (kernfs_lockdep(kn))
rwsem_release(&kn->dep_map, 1, _RET_IP_);
v = atomic_dec_return(&kn->active);
if (likely(v != KN_DEACTIVATED_BIAS))
@@ -190,16 +199,13 @@ static void kernfs_deactivate(struct kernfs_node *kn)
lockdep_assert_held(&kernfs_mutex);
BUG_ON(!(kn->flags & KERNFS_REMOVED));
- if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
- return;
-
/* only the first invocation on @kn should deactivate it */
if (atomic_read(&kn->active) >= 0)
atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
mutex_unlock(&kernfs_mutex);
- if (kn->flags & KERNFS_LOCKDEP) {
+ if (kernfs_lockdep(kn)) {
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
lock_contended(&kn->dep_map, _RET_IP_);
@@ -209,7 +215,7 @@ static void kernfs_deactivate(struct kernfs_node *kn)
wait_event(root->deactivate_waitq,
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
- if (kn->flags & KERNFS_LOCKDEP) {
+ if (kernfs_lockdep(kn)) {
lock_acquired(&kn->dep_map, _RET_IP_);
rwsem_release(&kn->dep_map, 1, _RET_IP_);
}