summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-06-10 23:06:46 +0200
committerDavid S. Miller <davem@davemloft.net>2017-06-10 23:06:46 +0200
commit95c4629d92d15d10e5f7c55bfb72583f96a91374 (patch)
treeef9b973bfbf9f85659f4fc2794d8271bd19c4857 /kernel/locking
parentMerge branch 'sparc64-queued-locks' (diff)
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vir... (diff)
downloadlinux-95c4629d92d15d10e5f7c55bfb72583f96a91374.tar.xz
linux-95c4629d92d15d10e5f7c55bfb72583f96a91374.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b95509416909..28cd09e635ed 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
int ret;
raw_spin_lock_irq(&lock->wait_lock);
-
- set_current_state(TASK_INTERRUPTIBLE);
-
/* sleep on the mutex */
+ set_current_state(TASK_INTERRUPTIBLE);
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
-
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
@@ -1822,15 +1824,25 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
raw_spin_lock_irq(&lock->wait_lock);
/*
+ * Do an unconditional try-lock, this deals with the lock stealing
+ * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
+ * sets a NULL owner.
+ *
+ * We're not interested in the return value, because the subsequent
+ * test on rt_mutex_owner() will infer that. If the trylock succeeded,
+ * we will own the lock and it will have removed the waiter. If we
+ * failed the trylock, we're still not owner and we need to remove
+ * ourselves.
+ */
+ try_to_take_rt_mutex(lock, current, waiter);
+ /*
* Unless we're the owner; we're still enqueued on the wait_list.
* So check if we became owner, if not, take us off the wait_list.
*/
if (rt_mutex_owner(lock) != current) {
remove_waiter(lock, waiter);
- fixup_rt_mutex_waiters(lock);
cleanup = true;
}
-
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.