diff options
author | Peter Zijlstra <peterz@infradead.org> | 2021-08-15 23:28:44 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2021-08-17 19:04:49 +0200 |
commit | 5297ccb2c50916c59294a63fae79fe01a7fbb79a (patch) | |
tree | 68133aab90e6a8e6a55601195f94650600b8e6ca /kernel/locking | |
parent | locking/ww_mutex: Split out the W/W implementation logic into kernel/locking/... (diff) | |
download | linux-5297ccb2c50916c59294a63fae79fe01a7fbb79a.tar.xz linux-5297ccb2c50916c59294a63fae79fe01a7fbb79a.zip |
locking/ww_mutex: Remove the __sched annotation from ww_mutex APIs
None of these functions will be on the stack when blocking in
schedule(), hence __sched is not needed.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211304.453235952@linutronix.de
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/ww_mutex.h | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h index dadc798dfdee..6a98f3bb7e24 100644 --- a/kernel/locking/ww_mutex.h +++ b/kernel/locking/ww_mutex.h @@ -62,7 +62,7 @@ ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) * transaction than @b and depending on algorithm either needs to wait for * @b or die. */ -static inline bool __sched +static inline bool __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) { @@ -77,7 +77,7 @@ __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and * __ww_mutex_check_kill() wake any but the earliest context. */ -static bool __sched +static bool __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, struct ww_acquire_ctx *ww_ctx) { @@ -154,7 +154,7 @@ static bool __ww_mutex_wound(struct mutex *lock, * * The current task must not be on the wait list. */ -static void __sched +static void __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) { struct mutex_waiter *cur; @@ -210,7 +210,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) raw_spin_unlock(&lock->base.wait_lock); } -static __always_inline int __sched +static __always_inline int __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) { if (ww_ctx->acquired > 0) { @@ -238,7 +238,7 @@ __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to * look at waiters before us in the wait-list. */ -static inline int __sched +static inline int __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, struct ww_acquire_ctx *ctx) { @@ -285,7 +285,7 @@ __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, * older contexts already waiting) to avoid unnecessary waiting and for * Wound-Wait ensure we wound the owning context when it is younger. */ -static inline int __sched +static inline int __ww_mutex_add_waiter(struct mutex_waiter *waiter, struct mutex *lock, struct ww_acquire_ctx *ww_ctx) |