summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2022-03-30 13:06:54 +0200
committerPeter Zijlstra <peterz@infradead.org>2022-04-05 10:24:36 +0200
commitdc1f7893a70fe403983bd8492f177bf993940e2c (patch)
tree0601a9c8f14c338fbae6755284036b50e98501d1
parentlocking: Apply contention tracepoints in the slow path (diff)
downloadlinux-dc1f7893a70fe403983bd8492f177bf993940e2c.tar.xz
linux-dc1f7893a70fe403983bd8492f177bf993940e2c.zip
locking/mutex: Make contention tracepoints more consistent wrt adaptive spinning
Have the trace_contention_*() tracepoints consistently include adaptive spinning. In order to differentiate between the spinning and non-spinning states add LCB_F_MUTEX and combine with LCB_F_SPIN. The consequence is that a mutex contention can now triggler multiple _begin() tracepoints before triggering an _end(). Additionally, this fixes one path where mutex would trigger _end() without ever seeing a _begin(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-rw-r--r--include/trace/events/lock.h4
-rw-r--r--kernel/locking/mutex.c16
2 files changed, 15 insertions, 5 deletions
diff --git a/include/trace/events/lock.h b/include/trace/events/lock.h
index b9b6e3edd518..9ebd081e057e 100644
--- a/include/trace/events/lock.h
+++ b/include/trace/events/lock.h
@@ -14,6 +14,7 @@
#define LCB_F_WRITE (1U << 2)
#define LCB_F_RT (1U << 3)
#define LCB_F_PERCPU (1U << 4)
+#define LCB_F_MUTEX (1U << 5)
#ifdef CONFIG_LOCKDEP
@@ -113,7 +114,8 @@ TRACE_EVENT(contention_begin,
{ LCB_F_READ, "READ" },
{ LCB_F_WRITE, "WRITE" },
{ LCB_F_RT, "RT" },
- { LCB_F_PERCPU, "PERCPU" }
+ { LCB_F_PERCPU, "PERCPU" },
+ { LCB_F_MUTEX, "MUTEX" }
))
);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index c88deda77cf2..d973fe6041bf 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -602,12 +602,14 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+ trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
if (__mutex_trylock(lock) ||
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
if (ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
+ trace_contention_end(lock, 0);
preempt_enable();
return 0;
}
@@ -644,7 +646,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
}
set_current_state(state);
- trace_contention_begin(lock, 0);
+ trace_contention_begin(lock, LCB_F_MUTEX);
for (;;) {
bool first;
@@ -684,10 +686,16 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
* state back to RUNNING and fall through the next schedule(),
* or we must see its unlock and acquire.
*/
- if (__mutex_trylock_or_handoff(lock, first) ||
- (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
+ if (__mutex_trylock_or_handoff(lock, first))
break;
+ if (first) {
+ trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
+ if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
+ break;
+ trace_contention_begin(lock, LCB_F_MUTEX);
+ }
+
raw_spin_lock(&lock->wait_lock);
}
raw_spin_lock(&lock->wait_lock);
@@ -723,8 +731,8 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
__mutex_remove_waiter(lock, &waiter);
- trace_contention_end(lock, ret);
err_early_kill:
+ trace_contention_end(lock, ret);
raw_spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, ip);