summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-09-02 13:42:12 +0200
committerIngo Molnar <mingo@kernel.org>2016-10-25 11:31:53 +0200
commit5bbd7e644378334700889fb762d5893985a7311f (patch)
tree4de0e1d4dcb783e6cb86ba234ff79f85ffab18a7 /kernel/locking
parentlocking/mutex: Add lock handoff to avoid starvation (diff)
downloadlinux-5bbd7e644378334700889fb762d5893985a7311f.tar.xz
linux-5bbd7e644378334700889fb762d5893985a7311f.zip
locking/mutex: Restructure wait loop
Doesn't really matter yet, but pull the HANDOFF and trylock out from under the wait_lock. The intention is to add an optimistic spin loop here, which requires we do not hold the wait_lock, so shuffle code around in preparation. Also clarify the purpose of taking the wait_lock in the wait loop, its tempting to want to avoid it altogether, but the cancellation cases need to to avoid losing wakeups. Suggested-by: Waiman Long <waiman.long@hpe.com> Tested-by: Jason Low <jason.low2@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/mutex.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index b4ebd8b9bcd5..8bb2304bb78d 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -631,13 +631,21 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
lock_contended(&lock->dep_map, ip);
+ set_task_state(task, state);
for (;;) {
+ /*
+ * Once we hold wait_lock, we're serialized against
+ * mutex_unlock() handing the lock off to us, do a trylock
+ * before testing the error conditions to make sure we pick up
+ * the handoff.
+ */
if (__mutex_trylock(lock, first))
- break;
+ goto acquired;
/*
- * got a signal? (This code gets eliminated in the
- * TASK_UNINTERRUPTIBLE case.)
+ * Check for signals and wound conditions while holding
+ * wait_lock. This ensures the lock cancellation is ordered
+ * against mutex_unlock() and wake-ups do not go missing.
*/
if (unlikely(signal_pending_state(state, task))) {
ret = -EINTR;
@@ -650,16 +658,27 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
- __set_task_state(task, state);
spin_unlock_mutex(&lock->wait_lock, flags);
schedule_preempt_disabled();
- spin_lock_mutex(&lock->wait_lock, flags);
if (!first && __mutex_waiter_is_first(lock, &waiter)) {
first = true;
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
}
+
+ set_task_state(task, state);
+ /*
+ * Here we order against unlock; we must either see it change
+ * state back to RUNNING and fall through the next schedule(),
+ * or we must see its unlock and acquire.
+ */
+ if (__mutex_trylock(lock, first))
+ break;
+
+ spin_lock_mutex(&lock->wait_lock, flags);
}
+ spin_lock_mutex(&lock->wait_lock, flags);
+acquired:
__set_task_state(task, TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, task);
@@ -682,6 +701,7 @@ skip_wait:
return 0;
err:
+ __set_task_state(task, TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, task);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);