diff options
Diffstat (limited to 'kernel/locking/rtmutex_common.h')
-rw-r--r-- | kernel/locking/rtmutex_common.h | 105 |
1 files changed, 52 insertions, 53 deletions
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index ca6fb489007b..a90c22abdbca 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -13,6 +13,7 @@ #ifndef __KERNEL_RTMUTEX_COMMON_H #define __KERNEL_RTMUTEX_COMMON_H +#include <linux/debug_locks.h> #include <linux/rtmutex.h> #include <linux/sched/wake_q.h> @@ -23,34 +24,30 @@ * @tree_entry: pi node to enqueue into the mutex waiters tree * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree * @task: task reference to the blocked task + * @lock: Pointer to the rt_mutex on which the waiter blocks + * @prio: Priority of the waiter + * @deadline: Deadline of the waiter if applicable */ struct rt_mutex_waiter { - struct rb_node tree_entry; - struct rb_node pi_tree_entry; + struct rb_node tree_entry; + struct rb_node pi_tree_entry; struct task_struct *task; struct rt_mutex *lock; -#ifdef CONFIG_DEBUG_RT_MUTEXES - unsigned long ip; - struct pid *deadlock_task_pid; - struct rt_mutex *deadlock_lock; -#endif - int prio; - u64 deadline; + int prio; + u64 deadline; }; /* - * Various helpers to access the waiters-tree: + * Must be guarded because this header is included from rcu/tree_plugin.h + * unconditionally. */ - #ifdef CONFIG_RT_MUTEXES - static inline int rt_mutex_has_waiters(struct rt_mutex *lock) { return !RB_EMPTY_ROOT(&lock->waiters.rb_root); } -static inline struct rt_mutex_waiter * -rt_mutex_top_waiter(struct rt_mutex *lock) +static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex *lock) { struct rb_node *leftmost = rb_first_cached(&lock->waiters); struct rt_mutex_waiter *w = NULL; @@ -67,42 +64,12 @@ static inline int task_has_pi_waiters(struct task_struct *p) return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root); } -static inline struct rt_mutex_waiter * -task_top_pi_waiter(struct task_struct *p) -{ - return rb_entry(p->pi_waiters.rb_leftmost, - struct rt_mutex_waiter, pi_tree_entry); -} - -#else - -static inline int rt_mutex_has_waiters(struct rt_mutex *lock) -{ - return false; -} - -static inline struct rt_mutex_waiter * -rt_mutex_top_waiter(struct rt_mutex *lock) +static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p) { - return NULL; -} - -static inline int task_has_pi_waiters(struct task_struct *p) -{ - return false; + return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, + pi_tree_entry); } -static inline struct rt_mutex_waiter * -task_top_pi_waiter(struct task_struct *p) -{ - return NULL; -} - -#endif - -/* - * lock->owner state tracking: - */ #define RT_MUTEX_HAS_WAITERS 1UL static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) @@ -111,6 +78,13 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); } +#else /* CONFIG_RT_MUTEXES */ +/* Used in rcu/tree_plugin.h */ +static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) +{ + return NULL; +} +#endif /* !CONFIG_RT_MUTEXES */ /* * Constants for rt mutex functions which have a selectable deadlock @@ -127,10 +101,16 @@ enum rtmutex_chainwalk { RT_MUTEX_FULL_CHAINWALK, }; +static inline void __rt_mutex_basic_init(struct rt_mutex *lock) +{ + lock->owner = NULL; + raw_spin_lock_init(&lock->wait_lock); + lock->waiters = RB_ROOT_CACHED; +} + /* * PI-futex support (proxy locking functions, etc.): */ -extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); @@ -156,10 +136,29 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, extern void rt_mutex_postunlock(struct wake_q_head *wake_q); -#ifdef CONFIG_DEBUG_RT_MUTEXES -# include "rtmutex-debug.h" -#else -# include "rtmutex.h" -#endif +/* Debug functions */ +static inline void debug_rt_mutex_unlock(struct rt_mutex *lock) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); +} + +static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); +} + +static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + memset(waiter, 0x11, sizeof(*waiter)); +} + +static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + memset(waiter, 0x22, sizeof(*waiter)); +} #endif |