summaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-04-06 01:49:33 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-10 12:50:56 +0200
commit2062501ae6505dbc5bff3a792246c2661d114050 (patch)
tree59792987c9e9fd1ae657c2e4a5cdb14545523e24 /kernel/lockdep.c
parentMerge branch 'tracing/urgent' into tracing/core (diff)
downloadlinux-2062501ae6505dbc5bff3a792246c2661d114050.tar.xz
linux-2062501ae6505dbc5bff3a792246c2661d114050.zip
tracing/lockdep: report the time waited for a lock
While trying to optimize the new lock on reiserfs to replace the bkl, I find the lock tracing very useful though it lacks something important for performance (and latency) instrumentation: the time a task waits for a lock. That's what this patch implements: bash-4816 [000] 202.652815: lock_contended: lock_contended: &sb->s_type->i_mutex_key bash-4816 [000] 202.652819: lock_acquired: &rq->lock (0.000 us) <...>-4787 [000] 202.652825: lock_acquired: &rq->lock (0.000 us) <...>-4787 [000] 202.652829: lock_acquired: &rq->lock (0.000 us) bash-4816 [000] 202.652833: lock_acquired: &sb->s_type->i_mutex_key (16.005 us) As shown above, the "lock acquired" field is followed by the time it has been waiting for the lock. Usually, a lock contended entry is followed by a near lock_acquired entry with a non-zero time waited. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1238975373-15739-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b0f011866969..c4582a6ea953 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3061,6 +3061,8 @@ found_it:
put_lock_stats(stats);
}
+DEFINE_TRACE(lock_acquired);
+
static void
__lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
@@ -3099,6 +3101,8 @@ found_it:
hlock->holdtime_stamp = now;
}
+ trace_lock_acquired(lock, ip, waittime);
+
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
@@ -3137,14 +3141,10 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
}
EXPORT_SYMBOL_GPL(lock_contended);
-DEFINE_TRACE(lock_acquired);
-
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_acquired(lock, ip);
-
if (unlikely(!lock_stat))
return;