diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2021-08-15 23:28:25 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2021-08-17 17:45:37 +0200 |
commit | 1c143c4b65da09081d644110e619decc49c9dee4 (patch) | |
tree | 4183119f2daea02c3d6617a996603a0af020cd4e /kernel/locking/rtmutex.c | |
parent | locking/spinlock: Provide RT variant header: <linux/spinlock_rt.h> (diff) | |
download | linux-1c143c4b65da09081d644110e619decc49c9dee4.tar.xz linux-1c143c4b65da09081d644110e619decc49c9dee4.zip |
locking/rtmutex: Provide the spin/rwlock core lock function
A simplified version of the rtmutex slowlock function, which neither handles
signals nor timeouts, and is careful about preserving the state of the
blocked task across the lock operation.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211303.770228446@linutronix.de
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r-- | kernel/locking/rtmutex.c | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 949781aa54b1..951bef073891 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1416,3 +1416,63 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock, return rt_mutex_slowlock(lock, state); } #endif /* RT_MUTEX_BUILD_MUTEX */ + +#ifdef RT_MUTEX_BUILD_SPINLOCKS +/* + * Functions required for spin/rw_lock substitution on RT kernels + */ + +/** + * rtlock_slowlock_locked - Slow path lock acquisition for RT locks + * @lock: The underlying RT mutex + */ +static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) +{ + struct rt_mutex_waiter waiter; + + lockdep_assert_held(&lock->wait_lock); + + if (try_to_take_rt_mutex(lock, current, NULL)) + return; + + rt_mutex_init_rtlock_waiter(&waiter); + + /* Save current state and set state to TASK_RTLOCK_WAIT */ + current_save_and_set_rtlock_wait_state(); + + task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK); + + for (;;) { + /* Try to acquire the lock again */ + if (try_to_take_rt_mutex(lock, current, &waiter)) + break; + + raw_spin_unlock_irq(&lock->wait_lock); + + schedule_rtlock(); + + raw_spin_lock_irq(&lock->wait_lock); + set_current_state(TASK_RTLOCK_WAIT); + } + + /* Restore the task state */ + current_restore_rtlock_saved_state(); + + /* + * try_to_take_rt_mutex() sets the waiter bit unconditionally. + * We might have to fix that up: + */ + fixup_rt_mutex_waiters(lock); + debug_rt_mutex_free_waiter(&waiter); +} + +static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + rtlock_slowlock_locked(lock); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); +} + +#endif /* RT_MUTEX_BUILD_SPINLOCKS */ |