summaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-15 23:27:54 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 17:00:48 +0200
commit785159301bedea25fae9b20cae3d12377246e941 (patch)
treebfb6e0e4f0f08d2b51c055fb4e0908b7b4e3d2d1 /kernel/locking/rtmutex.c
parentlocking/rtmutex: Remove rt_mutex_is_locked() (diff)
downloadlinux-785159301bedea25fae9b20cae3d12377246e941.tar.xz
linux-785159301bedea25fae9b20cae3d12377246e941.zip
locking/rtmutex: Convert macros to inlines
Inlines are type-safe... Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211302.610830960@linutronix.de
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c31
1 files changed, 27 insertions, 4 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 1a7e3f838077..5187added8bc 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -141,8 +141,19 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock)
* set up.
*/
#ifndef CONFIG_DEBUG_RT_MUTEXES
-# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
-# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
+static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+{
+ return cmpxchg_acquire(&lock->owner, old, new) == old;
+}
+
+static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+{
+ return cmpxchg_release(&lock->owner, old, new) == old;
+}
/*
* Callers must hold the ->wait_lock -- which is the whole purpose as we force
@@ -201,8 +212,20 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
}
#else
-# define rt_mutex_cmpxchg_acquire(l,c,n) (0)
-# define rt_mutex_cmpxchg_release(l,c,n) (0)
+static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+{
+ return false;
+
+}
+
+static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex *lock,
+ struct task_struct *old,
+ struct task_struct *new)
+{
+ return false;
+}
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
{