summaryrefslogtreecommitdiffstats
path: root/kernel/locking/ww_mutex.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-08-15 23:28:45 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 19:04:52 +0200
commit23d599eb2377404100d0d1508e12b0a2c40b49b1 (patch)
tree38b975750d093112d83cc60047f094163e243f52 /kernel/locking/ww_mutex.h
parentlocking/ww_mutex: Remove the __sched annotation from ww_mutex APIs (diff)
downloadlinux-23d599eb2377404100d0d1508e12b0a2c40b49b1.tar.xz
linux-23d599eb2377404100d0d1508e12b0a2c40b49b1.zip
locking/ww_mutex: Abstract out the waiter iteration
Split out the waiter iteration functions so they can be substituted for a rtmutex based ww_mutex later. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211304.509186185@linutronix.de
Diffstat (limited to 'kernel/locking/ww_mutex.h')
-rw-r--r--kernel/locking/ww_mutex.h57
1 files changed, 53 insertions, 4 deletions
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 6a98f3bb7e24..1cd178c0c5c2 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -1,5 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+static inline struct mutex_waiter *
+__ww_waiter_first(struct mutex *lock)
+{
+ struct mutex_waiter *w;
+
+ w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
+ if (list_entry_is_head(w, &lock->wait_list, list))
+ return NULL;
+
+ return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
+{
+ w = list_next_entry(w, list);
+ if (list_entry_is_head(w, &lock->wait_list, list))
+ return NULL;
+
+ return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
+{
+ w = list_prev_entry(w, list);
+ if (list_entry_is_head(w, &lock->wait_list, list))
+ return NULL;
+
+ return w;
+}
+
+static inline struct mutex_waiter *
+__ww_waiter_last(struct mutex *lock)
+{
+ struct mutex_waiter *w;
+
+ w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
+ if (list_entry_is_head(w, &lock->wait_list, list))
+ return NULL;
+
+ return w;
+}
+
/*
* Wait-Die:
* The newer transactions are killed when:
@@ -161,7 +205,9 @@ __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
lockdep_assert_held(&lock->wait_lock);
- list_for_each_entry(cur, &lock->wait_list, list) {
+ for (cur = __ww_waiter_first(lock); cur;
+ cur = __ww_waiter_next(lock, cur)) {
+
if (!cur->ww_ctx)
continue;
@@ -263,8 +309,9 @@ __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
* If there is a waiter in front of us that has a context, then its
* stamp is earlier than ours and we must kill ourself.
*/
- cur = waiter;
- list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
+ for (cur = __ww_waiter_prev(lock, waiter); cur;
+ cur = __ww_waiter_prev(lock, cur)) {
+
if (!cur->ww_ctx)
continue;
@@ -309,7 +356,9 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
* may wound the lock holder.
*/
pos = &lock->wait_list;
- list_for_each_entry_reverse(cur, &lock->wait_list, list) {
+ for (cur = __ww_waiter_last(lock); cur;
+ cur = __ww_waiter_prev(lock, cur)) {
+
if (!cur->ww_ctx)
continue;