summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-08-15 23:28:50 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 19:05:00 +0200
commitbdb189148ded4ffa826a1387074c795fda43b3ba (patch)
treed4a3eb8efb925406f70c195af9d84f04452fbc20 /kernel/locking
parentlocking/ww_mutex: Abstract out mutex accessors (diff)
downloadlinux-bdb189148ded4ffa826a1387074c795fda43b3ba.tar.xz
linux-bdb189148ded4ffa826a1387074c795fda43b3ba.zip
locking/ww_mutex: Abstract out mutex types
Some ww_mutex helper functions use pointers for the underlying mutex and mutex_waiter. The upcoming rtmutex based implementation needs to share these functions. Add and use defines for the types and replace the direct types in the affected functions. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211304.678720245@linutronix.de
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/ww_mutex.h23
1 files changed, 13 insertions, 10 deletions
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 842dbed0a8b2..31b075f03660 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
+#define MUTEX mutex
+#define MUTEX_WAITER mutex_waiter
+
static inline struct mutex_waiter *
__ww_waiter_first(struct mutex *lock)
{
@@ -143,7 +146,7 @@ __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
* __ww_mutex_check_kill() wake any but the earliest context.
*/
static bool
-__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
+__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ww_ctx)
{
if (!ww_ctx->is_wait_die)
@@ -165,7 +168,7 @@ __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
* the lock holders. Even if multiple waiters may wound the lock holder,
* it's sufficient that only one does.
*/
-static bool __ww_mutex_wound(struct mutex *lock,
+static bool __ww_mutex_wound(struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx,
struct ww_acquire_ctx *hold_ctx)
{
@@ -220,9 +223,9 @@ static bool __ww_mutex_wound(struct mutex *lock,
* The current task must not be on the wait list.
*/
static void
-__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
{
- struct mutex_waiter *cur;
+ struct MUTEX_WAITER *cur;
lockdep_assert_held(&lock->wait_lock);
@@ -278,7 +281,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
}
static __always_inline int
-__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
+__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
{
if (ww_ctx->acquired > 0) {
#ifdef CONFIG_DEBUG_MUTEXES
@@ -306,12 +309,12 @@ __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
* look at waiters before us in the wait-list.
*/
static inline int
-__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
+__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ctx)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
- struct mutex_waiter *cur;
+ struct MUTEX_WAITER *cur;
if (ctx->acquired == 0)
return 0;
@@ -354,11 +357,11 @@ __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
* Wound-Wait ensure we wound the owning context when it is younger.
*/
static inline int
-__ww_mutex_add_waiter(struct mutex_waiter *waiter,
- struct mutex *lock,
+__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
+ struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx)
{
- struct mutex_waiter *cur, *pos = NULL;
+ struct MUTEX_WAITER *cur, *pos = NULL;
bool is_wait_die;
if (!ww_ctx) {