summaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-08-15 23:28:06 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 17:15:36 +0200
commitc014ef69b3acdb8c9e7fc412e96944f4d5c36fa0 (patch)
treeac73d55475e9f3a06992ed6cf561bfcd02fc84d5 /kernel/locking/rtmutex.c
parentlocking/rwsem: Add rtmutex based R/W semaphore implementation (diff)
downloadlinux-c014ef69b3acdb8c9e7fc412e96944f4d5c36fa0.tar.xz
linux-c014ef69b3acdb8c9e7fc412e96944f4d5c36fa0.zip
locking/rtmutex: Add wake_state to rt_mutex_waiter
Regular sleeping locks like mutexes, rtmutexes and rw_semaphores are always entering and leaving a blocking section with task state == TASK_RUNNING. On a non-RT kernel spinlocks and rwlocks never affect the task state, but on RT kernels these locks are converted to rtmutex based 'sleeping' locks. So in case of contention the task goes to block, which requires to carefully preserve the task state, and restore it after acquiring the lock taking regular wakeups for the task into account, which happened while the task was blocked. This state preserving is achieved by having a separate task state for blocking on a RT spin/rwlock and a saved_state field in task_struct along with careful handling of these wakeup scenarios in try_to_wake_up(). To avoid conditionals in the rtmutex code, store the wake state which has to be used for waking a lock waiter in rt_mutex_waiter which allows to handle the regular and RT spin/rwlocks by handing it to wake_up_state(). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211303.079800739@linutronix.de
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 3d0b29cb5e63..c13b9b849a4b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -692,7 +692,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
* to get the lock.
*/
if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
- wake_up_process(rt_mutex_top_waiter(lock)->task);
+ wake_up_state(waiter->task, waiter->wake_state);
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}