diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-28 21:37:53 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-28 21:37:53 +0200 |
commit | 0ff0edb550e256597e505eff308f90d9a0b6677c (patch) | |
tree | 6d96f53e70f2bdec49627e30c2645ee97b987848 /kernel/locking | |
parent | Merge tag 'core-rcu-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/... (diff) | |
parent | kcsan: Fix printk format string (diff) | |
download | linux-0ff0edb550e256597e505eff308f90d9a0b6677c.tar.xz linux-0ff0edb550e256597e505eff308f90d9a0b6677c.zip |
Merge tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- rtmutex cleanup & spring cleaning pass that removes ~400 lines of
code
- Futex simplifications & cleanups
- Add debugging to the CSD code, to help track down a tenacious race
(or hw problem)
- Add lockdep_assert_not_held(), to allow code to require a lock to not
be held, and propagate this into the ath10k driver
- Misc LKMM documentation updates
- Misc KCSAN updates: cleanups & documentation updates
- Misc fixes and cleanups
- Fix locktorture bugs with ww_mutexes
* tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits)
kcsan: Fix printk format string
static_call: Relax static_call_update() function argument type
static_call: Fix unused variable warn w/o MODULE
locking/rtmutex: Clean up signal handling in __rt_mutex_slowlock()
locking/rtmutex: Restrict the trylock WARN_ON() to debug
locking/rtmutex: Fix misleading comment in rt_mutex_postunlock()
locking/rtmutex: Consolidate the fast/slowpath invocation
locking/rtmutex: Make text section and inlining consistent
locking/rtmutex: Move debug functions as inlines into common header
locking/rtmutex: Decrapify __rt_mutex_init()
locking/rtmutex: Remove pointless CONFIG_RT_MUTEXES=n stubs
locking/rtmutex: Inline chainwalk depth check
locking/rtmutex: Move rt_mutex_debug_task_free() to rtmutex.c
locking/rtmutex: Remove empty and unused debug stubs
locking/rtmutex: Consolidate rt_mutex_init()
locking/rtmutex: Remove output from deadlock detector
locking/rtmutex: Remove rtmutex deadlock tester leftovers
locking/rtmutex: Remove rt_mutex_timed_lock()
MAINTAINERS: Add myself as futex reviewer
locking/mutex: Remove repeated declaration
...
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/Makefile | 2 | ||||
-rw-r--r-- | kernel/locking/lockdep.c | 19 | ||||
-rw-r--r-- | kernel/locking/lockdep_proc.c | 2 | ||||
-rw-r--r-- | kernel/locking/locktorture.c | 146 | ||||
-rw-r--r-- | kernel/locking/mcs_spinlock.h | 2 | ||||
-rw-r--r-- | kernel/locking/mutex.c | 4 | ||||
-rw-r--r-- | kernel/locking/osq_lock.c | 4 | ||||
-rw-r--r-- | kernel/locking/rtmutex-debug.c | 182 | ||||
-rw-r--r-- | kernel/locking/rtmutex-debug.h | 37 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 432 | ||||
-rw-r--r-- | kernel/locking/rtmutex.h | 35 | ||||
-rw-r--r-- | kernel/locking/rtmutex_common.h | 105 | ||||
-rw-r--r-- | kernel/locking/rwsem.c | 4 | ||||
-rw-r--r-- | kernel/locking/spinlock.c | 4 |
14 files changed, 326 insertions, 652 deletions
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 8838f1d7c4a2..3572808223e4 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -12,7 +12,6 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) -CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) endif obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o @@ -26,7 +25,6 @@ obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o obj-$(CONFIG_RT_MUTEXES) += rtmutex.o -obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index ef28a0b9cf1e..48d736aa03b2 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -54,6 +54,7 @@ #include <linux/nmi.h> #include <linux/rcupdate.h> #include <linux/kprobes.h> +#include <linux/lockdep.h> #include <asm/sections.h> @@ -1747,7 +1748,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry, /* * Step 4: if not match, expand the path by adding the - * forward or backwards dependencis in the search + * forward or backwards dependencies in the search * */ first = true; @@ -1916,7 +1917,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the * dependency graph, as any strong path ..-> A -> B ->.. we can get with * having dependency A -> B, we could already get a equivalent path ..-> A -> - * .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant. + * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant. * * We need to make sure both the start and the end of A -> .. -> B is not * weaker than A -> B. For the start part, please see the comment in @@ -5253,13 +5254,13 @@ int __lock_is_held(const struct lockdep_map *lock, int read) if (match_held_lock(hlock, lock)) { if (read == -1 || hlock->read == read) - return 1; + return LOCK_STATE_HELD; - return 0; + return LOCK_STATE_NOT_HELD; } } - return 0; + return LOCK_STATE_NOT_HELD; } static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) @@ -5538,10 +5539,14 @@ EXPORT_SYMBOL_GPL(lock_release); noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) { unsigned long flags; - int ret = 0; + int ret = LOCK_STATE_NOT_HELD; + /* + * Avoid false negative lockdep_assert_held() and + * lockdep_assert_not_held(). + */ if (unlikely(!lockdep_enabled())) - return 1; /* avoid false negative lockdep_assert_held() */ + return LOCK_STATE_UNKNOWN; raw_local_irq_save(flags); check_flags(flags); diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c index 02ef87f50df2..806978314496 100644 --- a/kernel/locking/lockdep_proc.c +++ b/kernel/locking/lockdep_proc.c @@ -348,7 +348,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) debug_locks); /* - * Zappped classes and lockdep data buffers reuse statistics. + * Zapped classes and lockdep data buffers reuse statistics. */ seq_puts(m, "\n"); seq_printf(m, " zapped classes: %11lu\n", diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 0ab94e1f1276..b3adb40549bf 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -76,13 +76,13 @@ static void lock_torture_cleanup(void); struct lock_torture_ops { void (*init)(void); void (*exit)(void); - int (*writelock)(void); + int (*writelock)(int tid); void (*write_delay)(struct torture_random_state *trsp); void (*task_boost)(struct torture_random_state *trsp); - void (*writeunlock)(void); - int (*readlock)(void); + void (*writeunlock)(int tid); + int (*readlock)(int tid); void (*read_delay)(struct torture_random_state *trsp); - void (*readunlock)(void); + void (*readunlock)(int tid); unsigned long flags; /* for irq spinlocks */ const char *name; @@ -105,7 +105,7 @@ static struct lock_torture_cxt cxt = { 0, 0, false, false, * Definitions for lock torture testing. */ -static int torture_lock_busted_write_lock(void) +static int torture_lock_busted_write_lock(int tid __maybe_unused) { return 0; /* BUGGY, do not use in real life!!! */ } @@ -122,7 +122,7 @@ static void torture_lock_busted_write_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_lock_busted_write_unlock(void) +static void torture_lock_busted_write_unlock(int tid __maybe_unused) { /* BUGGY, do not use in real life!!! */ } @@ -145,7 +145,8 @@ static struct lock_torture_ops lock_busted_ops = { static DEFINE_SPINLOCK(torture_spinlock); -static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) +static int torture_spin_lock_write_lock(int tid __maybe_unused) +__acquires(torture_spinlock) { spin_lock(&torture_spinlock); return 0; @@ -169,7 +170,8 @@ static void torture_spin_lock_write_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) +static void torture_spin_lock_write_unlock(int tid __maybe_unused) +__releases(torture_spinlock) { spin_unlock(&torture_spinlock); } @@ -185,7 +187,7 @@ static struct lock_torture_ops spin_lock_ops = { .name = "spin_lock" }; -static int torture_spin_lock_write_lock_irq(void) +static int torture_spin_lock_write_lock_irq(int tid __maybe_unused) __acquires(torture_spinlock) { unsigned long flags; @@ -195,7 +197,7 @@ __acquires(torture_spinlock) return 0; } -static void torture_lock_spin_write_unlock_irq(void) +static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused) __releases(torture_spinlock) { spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); @@ -214,7 +216,8 @@ static struct lock_torture_ops spin_lock_irq_ops = { static DEFINE_RWLOCK(torture_rwlock); -static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) +static int torture_rwlock_write_lock(int tid __maybe_unused) +__acquires(torture_rwlock) { write_lock(&torture_rwlock); return 0; @@ -235,12 +238,14 @@ static void torture_rwlock_write_delay(struct torture_random_state *trsp) udelay(shortdelay_us); } -static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) +static void torture_rwlock_write_unlock(int tid __maybe_unused) +__releases(torture_rwlock) { write_unlock(&torture_rwlock); } -static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) +static int torture_rwlock_read_lock(int tid __maybe_unused) +__acquires(torture_rwlock) { read_lock(&torture_rwlock); return 0; @@ -261,7 +266,8 @@ static void torture_rwlock_read_delay(struct torture_random_state *trsp) udelay(shortdelay_us); } -static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) +static void torture_rwlock_read_unlock(int tid __maybe_unused) +__releases(torture_rwlock) { read_unlock(&torture_rwlock); } @@ -277,7 +283,8 @@ static struct lock_torture_ops rw_lock_ops = { .name = "rw_lock" }; -static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) +static int torture_rwlock_write_lock_irq(int tid __maybe_unused) +__acquires(torture_rwlock) { unsigned long flags; @@ -286,13 +293,14 @@ static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) return 0; } -static void torture_rwlock_write_unlock_irq(void) +static void torture_rwlock_write_unlock_irq(int tid __maybe_unused) __releases(torture_rwlock) { write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); } -static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) +static int torture_rwlock_read_lock_irq(int tid __maybe_unused) +__acquires(torture_rwlock) { unsigned long flags; @@ -301,7 +309,7 @@ static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) return 0; } -static void torture_rwlock_read_unlock_irq(void) +static void torture_rwlock_read_unlock_irq(int tid __maybe_unused) __releases(torture_rwlock) { read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); @@ -320,7 +328,8 @@ static struct lock_torture_ops rw_lock_irq_ops = { static DEFINE_MUTEX(torture_mutex); -static int torture_mutex_lock(void) __acquires(torture_mutex) +static int torture_mutex_lock(int tid __maybe_unused) +__acquires(torture_mutex) { mutex_lock(&torture_mutex); return 0; @@ -340,7 +349,8 @@ static void torture_mutex_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_mutex_unlock(void) __releases(torture_mutex) +static void torture_mutex_unlock(int tid __maybe_unused) +__releases(torture_mutex) { mutex_unlock(&torture_mutex); } @@ -357,12 +367,34 @@ static struct lock_torture_ops mutex_lock_ops = { }; #include <linux/ww_mutex.h> +/* + * The torture ww_mutexes should belong to the same lock class as + * torture_ww_class to avoid lockdep problem. The ww_mutex_init() + * function is called for initialization to ensure that. + */ static DEFINE_WD_CLASS(torture_ww_class); -static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class); -static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class); -static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class); +static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2; +static struct ww_acquire_ctx *ww_acquire_ctxs; + +static void torture_ww_mutex_init(void) +{ + ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class); + ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class); + ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class); + + ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress, + sizeof(*ww_acquire_ctxs), + GFP_KERNEL); + if (!ww_acquire_ctxs) + VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory"); +} + +static void torture_ww_mutex_exit(void) +{ + kfree(ww_acquire_ctxs); +} -static int torture_ww_mutex_lock(void) +static int torture_ww_mutex_lock(int tid) __acquires(torture_ww_mutex_0) __acquires(torture_ww_mutex_1) __acquires(torture_ww_mutex_2) @@ -372,7 +404,7 @@ __acquires(torture_ww_mutex_2) struct list_head link; struct ww_mutex *lock; } locks[3], *ll, *ln; - struct ww_acquire_ctx ctx; + struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; locks[0].lock = &torture_ww_mutex_0; list_add(&locks[0].link, &list); @@ -383,12 +415,12 @@ __acquires(torture_ww_mutex_2) locks[2].lock = &torture_ww_mutex_2; list_add(&locks[2].link, &list); - ww_acquire_init(&ctx, &torture_ww_class); + ww_acquire_init(ctx, &torture_ww_class); list_for_each_entry(ll, &list, link) { int err; - err = ww_mutex_lock(ll->lock, &ctx); + err = ww_mutex_lock(ll->lock, ctx); if (!err) continue; @@ -399,25 +431,29 @@ __acquires(torture_ww_mutex_2) if (err != -EDEADLK) return err; - ww_mutex_lock_slow(ll->lock, &ctx); + ww_mutex_lock_slow(ll->lock, ctx); list_move(&ll->link, &list); } - ww_acquire_fini(&ctx); return 0; } -static void torture_ww_mutex_unlock(void) +static void torture_ww_mutex_unlock(int tid) __releases(torture_ww_mutex_0) __releases(torture_ww_mutex_1) __releases(torture_ww_mutex_2) { + struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; + ww_mutex_unlock(&torture_ww_mutex_0); ww_mutex_unlock(&torture_ww_mutex_1); ww_mutex_unlock(&torture_ww_mutex_2); + ww_acquire_fini(ctx); } static struct lock_torture_ops ww_mutex_lock_ops = { + .init = torture_ww_mutex_init, + .exit = torture_ww_mutex_exit, .writelock = torture_ww_mutex_lock, .write_delay = torture_mutex_delay, .task_boost = torture_boost_dummy, @@ -431,7 +467,8 @@ static struct lock_torture_ops ww_mutex_lock_ops = { #ifdef CONFIG_RT_MUTEXES static DEFINE_RT_MUTEX(torture_rtmutex); -static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) +static int torture_rtmutex_lock(int tid __maybe_unused) +__acquires(torture_rtmutex) { rt_mutex_lock(&torture_rtmutex); return 0; @@ -487,7 +524,8 @@ static void torture_rtmutex_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) +static void torture_rtmutex_unlock(int tid __maybe_unused) +__releases(torture_rtmutex) { rt_mutex_unlock(&torture_rtmutex); } @@ -505,7 +543,8 @@ static struct lock_torture_ops rtmutex_lock_ops = { #endif static DECLARE_RWSEM(torture_rwsem); -static int torture_rwsem_down_write(void) __acquires(torture_rwsem) +static int torture_rwsem_down_write(int tid __maybe_unused) +__acquires(torture_rwsem) { down_write(&torture_rwsem); return 0; @@ -525,12 +564,14 @@ static void torture_rwsem_write_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_rwsem_up_write(void) __releases(torture_rwsem) +static void torture_rwsem_up_write(int tid __maybe_unused) +__releases(torture_rwsem) { up_write(&torture_rwsem); } -static int torture_rwsem_down_read(void) __acquires(torture_rwsem) +static int torture_rwsem_down_read(int tid __maybe_unused) +__acquires(torture_rwsem) { down_read(&torture_rwsem); return 0; @@ -550,7 +591,8 @@ static void torture_rwsem_read_delay(struct torture_random_state *trsp) torture_preempt_schedule(); /* Allow test to be preempted. */ } -static void torture_rwsem_up_read(void) __releases(torture_rwsem) +static void torture_rwsem_up_read(int tid __maybe_unused) +__releases(torture_rwsem) { up_read(&torture_rwsem); } @@ -579,24 +621,28 @@ static void torture_percpu_rwsem_exit(void) percpu_free_rwsem(&pcpu_rwsem); } -static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) +static int torture_percpu_rwsem_down_write(int tid __maybe_unused) +__acquires(pcpu_rwsem) { percpu_down_write(&pcpu_rwsem); return 0; } -static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) +static void torture_percpu_rwsem_up_write(int tid __maybe_unused) +__releases(pcpu_rwsem) { percpu_up_write(&pcpu_rwsem); } -static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) +static int torture_percpu_rwsem_down_read(int tid __maybe_unused) +__acquires(pcpu_rwsem) { percpu_down_read(&pcpu_rwsem); return 0; } -static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) +static void torture_percpu_rwsem_up_read(int tid __maybe_unused) +__releases(pcpu_rwsem) { percpu_up_read(&pcpu_rwsem); } @@ -621,6 +667,7 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = { static int lock_torture_writer(void *arg) { struct lock_stress_stats *lwsp = arg; + int tid = lwsp - cxt.lwsa; DEFINE_TORTURE_RANDOM(rand); VERBOSE_TOROUT_STRING("lock_torture_writer task started"); @@ -631,7 +678,7 @@ static int lock_torture_writer(void *arg) schedule_timeout_uninterruptible(1); cxt.cur_ops->task_boost(&rand); - cxt.cur_ops->writelock(); + cxt.cur_ops->writelock(tid); if (WARN_ON_ONCE(lock_is_write_held)) lwsp->n_lock_fail++; lock_is_write_held = true; @@ -642,7 +689,7 @@ static int lock_torture_writer(void *arg) cxt.cur_ops->write_delay(&rand); lock_is_write_held = false; WRITE_ONCE(last_lock_release, jiffies); - cxt.cur_ops->writeunlock(); + cxt.cur_ops->writeunlock(tid); stutter_wait("lock_torture_writer"); } while (!torture_must_stop()); @@ -659,6 +706,7 @@ static int lock_torture_writer(void *arg) static int lock_torture_reader(void *arg) { struct lock_stress_stats *lrsp = arg; + int tid = lrsp - cxt.lrsa; DEFINE_TORTURE_RANDOM(rand); VERBOSE_TOROUT_STRING("lock_torture_reader task started"); @@ -668,7 +716,7 @@ static int lock_torture_reader(void *arg) if ((torture_random(&rand) & 0xfffff) == 0) schedule_timeout_uninterruptible(1); - cxt.cur_ops->readlock(); + cxt.cur_ops->readlock(tid); lock_is_read_held = true; if (WARN_ON_ONCE(lock_is_write_held)) lrsp->n_lock_fail++; /* rare, but... */ @@ -676,7 +724,7 @@ static int lock_torture_reader(void *arg) lrsp->n_lock_acquired++; cxt.cur_ops->read_delay(&rand); lock_is_read_held = false; - cxt.cur_ops->readunlock(); + cxt.cur_ops->readunlock(tid); stutter_wait("lock_torture_reader"); } while (!torture_must_stop()); @@ -891,16 +939,16 @@ static int __init lock_torture_init(void) goto unwind; } - if (cxt.cur_ops->init) { - cxt.cur_ops->init(); - cxt.init_called = true; - } - if (nwriters_stress >= 0) cxt.nrealwriters_stress = nwriters_stress; else cxt.nrealwriters_stress = 2 * num_online_cpus(); + if (cxt.cur_ops->init) { + cxt.cur_ops->init(); + cxt.init_called = true; + } + #ifdef CONFIG_DEBUG_MUTEXES if (str_has_prefix(torture_type, "mutex")) cxt.debug_lock = true; diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 5e10153b4d3c..85251d8771d9 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -7,7 +7,7 @@ * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock * with the desirable properties of being fair, and with each cpu trying * to acquire the lock spinning on a local variable. - * It avoids expensive cache bouncings that common test-and-set spin-lock + * It avoids expensive cache bounces that common test-and-set spin-lock * implementations incur. */ #ifndef __LINUX_MCS_SPINLOCK_H diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 622ebdfcd083..cb6b112ce155 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -92,7 +92,7 @@ static inline unsigned long __owner_flags(unsigned long owner) } /* - * Trylock variant that retuns the owning task on failure. + * Trylock variant that returns the owning task on failure. */ static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) { @@ -207,7 +207,7 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, /* * Give up ownership to a specific task, when @task = NULL, this is equivalent - * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves + * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves * WAITERS. Provides RELEASE semantics like a regular unlock, the * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. */ diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 1de006ed3aa8..d5610ad52b92 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -135,7 +135,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) */ /* - * Wait to acquire the lock or cancelation. Note that need_resched() + * Wait to acquire the lock or cancellation. Note that need_resched() * will come with an IPI, which will wake smp_cond_load_relaxed() if it * is implemented with a monitor-wait. vcpu_is_preempted() relies on * polling, be careful. @@ -164,7 +164,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) /* * We can only fail the cmpxchg() racing against an unlock(), - * in which case we should observe @node->locked becomming + * in which case we should observe @node->locked becoming * true. */ if (smp_load_acquire(&node->locked)) diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c deleted file mode 100644 index 36e69100e8e0..000000000000 --- a/kernel/locking/rtmutex-debug.c +++ /dev/null @@ -1,182 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * RT-Mutexes: blocking mutual exclusion locks with PI support - * - * started by Ingo Molnar and Thomas Gleixner: - * - * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> - * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> - * - * This code is based on the rt.c implementation in the preempt-rt tree. - * Portions of said code are - * - * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey - * Copyright (C) 2006 Esben Nielsen - * Copyright (C) 2006 Kihon Technologies Inc., - * Steven Rostedt <rostedt@goodmis.org> - * - * See rt.c in preempt-rt for proper credits and further information - */ -#include <linux/sched.h> -#include <linux/sched/rt.h> -#include <linux/sched/debug.h> -#include <linux/delay.h> -#include <linux/export.h> -#include <linux/spinlock.h> -#include <linux/kallsyms.h> -#include <linux/syscalls.h> -#include <linux/interrupt.h> -#include <linux/rbtree.h> -#include <linux/fs.h> -#include <linux/debug_locks.h> - -#include "rtmutex_common.h" - -static void printk_task(struct task_struct *p) -{ - if (p) - printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio); - else - printk("<none>"); -} - -static void printk_lock(struct rt_mutex *lock, int print_owner) -{ - if (lock->name) - printk(" [%p] {%s}\n", - lock, lock->name); - else - printk(" [%p] {%s:%d}\n", - lock, lock->file, lock->line); - - if (print_owner && rt_mutex_owner(lock)) { - printk(".. ->owner: %p\n", lock->owner); - printk(".. held by: "); - printk_task(rt_mutex_owner(lock)); - printk("\n"); - } -} - -void rt_mutex_debug_task_free(struct task_struct *task) -{ - DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); - DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); -} - -/* - * We fill out the fields in the waiter to store the information about - * the deadlock. We print when we return. act_waiter can be NULL in - * case of a remove waiter operation. - */ -void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, - struct rt_mutex_waiter *act_waiter, - struct rt_mutex *lock) -{ - struct task_struct *task; - - if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter) - return; - - task = rt_mutex_owner(act_waiter->lock); - if (task && task != current) { - act_waiter->deadlock_task_pid = get_pid(task_pid(task)); - act_waiter->deadlock_lock = lock; - } -} - -void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) -{ - struct task_struct *task; - - if (!waiter->deadlock_lock || !debug_locks) - return; - - rcu_read_lock(); - task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); - if (!task) { - rcu_read_unlock(); - return; - } - - if (!debug_locks_off()) { - rcu_read_unlock(); - return; - } - - pr_warn("\n"); - pr_warn("============================================\n"); - pr_warn("WARNING: circular locking deadlock detected!\n"); - pr_warn("%s\n", print_tainted()); - pr_warn("--------------------------------------------\n"); - printk("%s/%d is deadlocking current task %s/%d\n\n", - task->comm, task_pid_nr(task), - current->comm, task_pid_nr(current)); - - printk("\n1) %s/%d is trying to acquire this lock:\n", - current->comm, task_pid_nr(current)); - printk_lock(waiter->lock, 1); - - printk("\n2) %s/%d is blocked on this lock:\n", - task->comm, task_pid_nr(task)); - printk_lock(waiter->deadlock_lock, 1); - - debug_show_held_locks(current); - debug_show_held_locks(task); - - printk("\n%s/%d's [blocked] stackdump:\n\n", - task->comm, task_pid_nr(task)); - show_stack(task, NULL, KERN_DEFAULT); - printk("\n%s/%d's [current] stackdump:\n\n", - current->comm, task_pid_nr(current)); - dump_stack(); - debug_show_all_locks(); - rcu_read_unlock(); - - printk("[ turning off deadlock detection." - "Please report this trace. ]\n\n"); -} - -void debug_rt_mutex_lock(struct rt_mutex *lock) -{ -} - -void debug_rt_mutex_unlock(struct rt_mutex *lock) -{ - DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); -} - -void -debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) -{ -} - -void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) -{ - DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); -} - -void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) -{ - memset(waiter, 0x11, sizeof(*waiter)); - waiter->deadlock_task_pid = NULL; -} - -void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) -{ - put_pid(waiter->deadlock_task_pid); - memset(waiter, 0x22, sizeof(*waiter)); -} - -void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) -{ - /* - * Make sure we are not reinitializing a held lock: - */ - debug_check_no_locks_freed((void *)lock, sizeof(*lock)); - lock->name = name; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC - lockdep_init_map(&lock->dep_map, name, key, 0); -#endif -} - diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h deleted file mode 100644 index fc549713bba3..000000000000 --- a/kernel/locking/rtmutex-debug.h +++ /dev/null @@ -1,37 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * RT-Mutexes: blocking mutual exclusion locks with PI support - * - * started by Ingo Molnar and Thomas Gleixner: - * - * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> - * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> - * - * This file contains macros used solely by rtmutex.c. Debug version. - */ - -extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); -extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); -extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); -extern void debug_rt_mutex_lock(struct rt_mutex *lock); -extern void debug_rt_mutex_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, - struct task_struct *powner); -extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); -extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, - struct rt_mutex_waiter *waiter, - struct rt_mutex *lock); -extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); -# define debug_rt_mutex_reset_waiter(w) \ - do { (w)->deadlock_lock = NULL; } while (0) - -static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, - enum rtmutex_chainwalk walk) -{ - return (waiter != NULL); -} - -static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) -{ - debug_rt_mutex_print_deadlock(w); -} diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 48fff6437901..406818196a9f 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -49,7 +49,7 @@ * set this bit before looking at the lock. */ -static void +static __always_inline void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) { unsigned long val = (unsigned long)owner; @@ -60,13 +60,13 @@ rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) WRITE_ONCE(lock->owner, (struct task_struct *)val); } -static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void clear_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); } -static void fixup_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -149,7 +149,7 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) * all future threads that attempt to [Rmw] the lock to the slowpath. As such * relaxed semantics suffice. */ -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { unsigned long owner, *p = (unsigned long *) &lock->owner; @@ -165,8 +165,8 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) * 2) Drop lock->wait_lock * 3) Try to unlock the lock with cmpxchg */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + unsigned long flags) __releases(lock->wait_lock) { struct task_struct *owner = rt_mutex_owner(lock); @@ -204,7 +204,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, # define rt_mutex_cmpxchg_acquire(l,c,n) (0) # define rt_mutex_cmpxchg_release(l,c,n) (0) -static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) +static __always_inline void mark_rt_mutex_waiters(struct rt_mutex *lock) { lock->owner = (struct task_struct *) ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); @@ -213,8 +213,8 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) /* * Simple slow path only version: lock->owner is protected by lock->wait_lock. */ -static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, - unsigned long flags) +static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, + unsigned long flags) __releases(lock->wait_lock) { lock->owner = NULL; @@ -229,9 +229,8 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, #define task_to_waiter(p) \ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } -static inline int -rt_mutex_waiter_less(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) { if (left->prio < right->prio) return 1; @@ -248,9 +247,8 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left, return 0; } -static inline int -rt_mutex_waiter_equal(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) { if (left->prio != right->prio) return 0; @@ -270,18 +268,18 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left, #define __node_2_waiter(node) \ rb_entry((node), struct rt_mutex_waiter, tree_entry) -static inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) +static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) { return rt_mutex_waiter_less(__node_2_waiter(a), __node_2_waiter(b)); } -static void +static __always_inline void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); } -static void +static __always_inline void rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { if (RB_EMPTY_NODE(&waiter->tree_entry)) @@ -294,18 +292,19 @@ rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) #define __node_2_pi_waiter(node) \ rb_entry((node), struct rt_mutex_waiter, pi_tree_entry) -static inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) +static __always_inline bool +__pi_waiter_less(struct rb_node *a, const struct rb_node *b) { return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b)); } -static void +static __always_inline void rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) { rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); } -static void +static __always_inline void rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) { if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) @@ -315,7 +314,7 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) RB_CLEAR_NODE(&waiter->pi_tree_entry); } -static void rt_mutex_adjust_prio(struct task_struct *p) +static __always_inline void rt_mutex_adjust_prio(struct task_struct *p) { struct task_struct *pi_task = NULL; @@ -340,17 +339,13 @@ static void rt_mutex_adjust_prio(struct task_struct *p) * deadlock detection is disabled independent of the detect argument * and the config settings. */ -static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, - enum rtmutex_chainwalk chwalk) +static __always_inline bool +rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, + enum rtmutex_chainwalk chwalk) { - /* - * This is just a wrapper function for the following call, - * because debug_rt_mutex_detect_deadlock() smells like a magic - * debug feature and I wanted to keep the cond function in the - * main source file along with the comments instead of having - * two of the same in the headers. - */ - return debug_rt_mutex_detect_deadlock(waiter, chwalk); + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEX)) + return waiter != NULL; + return chwalk == RT_MUTEX_FULL_CHAINWALK; } /* @@ -358,7 +353,7 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, */ int max_lock_depth = 1024; -static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) +static __always_inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) { return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; } @@ -426,12 +421,12 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) * unlock(lock->wait_lock); release [L] * goto again; */ -static int rt_mutex_adjust_prio_chain(struct task_struct *task, - enum rtmutex_chainwalk chwalk, - struct rt_mutex *orig_lock, - struct rt_mutex *next_lock, - struct rt_mutex_waiter *orig_waiter, - struct task_struct *top_task) +static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, + enum rtmutex_chainwalk chwalk, + struct rt_mutex *orig_lock, + struct rt_mutex *next_lock, + struct rt_mutex_waiter *orig_waiter, + struct task_struct *top_task) { struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; struct rt_mutex_waiter *prerequeue_top_waiter; @@ -579,7 +574,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * walk, we detected a deadlock. */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { - debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); raw_spin_unlock(&lock->wait_lock); ret = -EDEADLK; goto out_unlock_pi; @@ -706,7 +700,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, } else if (prerequeue_top_waiter == waiter) { /* * The waiter was the top waiter on the lock, but is - * no longer the top prority waiter. Replace waiter in + * no longer the top priority waiter. Replace waiter in * the owner tasks pi waiters tree with the new top * (highest priority) waiter and adjust the priority * of the owner. @@ -784,8 +778,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @waiter: The waiter that is queued to the lock's wait tree if the * callsite called task_blocked_on_lock(), otherwise NULL */ -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) +static int __sched +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) { lockdep_assert_held(&lock->wait_lock); @@ -886,9 +881,6 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, raw_spin_unlock(&task->pi_lock); takeit: - /* We got the lock. */ - debug_rt_mutex_lock(lock); - /* * This either preserves the RT_MUTEX_HAS_WAITERS bit if there * are still waiters or clears it. @@ -905,10 +897,10 @@ takeit: * * This must be called with lock->wait_lock held and interrupts disabled */ -static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task, - enum rtmutex_chainwalk chwalk) +static int __sched task_blocks_on_rt_mutex(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task, + enum rtmutex_chainwalk chwalk) { struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; @@ -994,8 +986,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, * * Called with lock->wait_lock held and interrupts disabled. */ -static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, - struct rt_mutex *lock) +static void __sched mark_wakeup_next_waiter(struct wake_q_head *wake_q, + struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; @@ -1044,8 +1036,8 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, * Must be called with lock->wait_lock held and interrupts disabled. I must * have just failed to try_to_take_rt_mutex(). */ -static void remove_waiter(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) +static void __sched remove_waiter(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter) { bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); @@ -1102,7 +1094,7 @@ static void remove_waiter(struct rt_mutex *lock, * * Called from sched_setscheduler */ -void rt_mutex_adjust_pi(struct task_struct *task) +void __sched rt_mutex_adjust_pi(struct task_struct *task) { struct rt_mutex_waiter *waiter; struct rt_mutex *next_lock; @@ -1125,7 +1117,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) next_lock, NULL, task); } -void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) +void __sched rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) { debug_rt_mutex_init_waiter(waiter); RB_CLEAR_NODE(&waiter->pi_tree_entry); @@ -1143,10 +1135,9 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) * * Must be called with lock->wait_lock held and interrupts disabled */ -static int __sched -__rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) +static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + struct rt_mutex_waiter *waiter) { int ret = 0; @@ -1155,24 +1146,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, if (try_to_take_rt_mutex(lock, current, waiter)) break; - /* - * TASK_INTERRUPTIBLE checks for signals and - * timeout. Ignored otherwise. - */ - if (likely(state == TASK_INTERRUPTIBLE)) { - /* Signal pending? */ - if (signal_pending(current)) - ret = -EINTR; - if (timeout && !timeout->task) - ret = -ETIMEDOUT; - if (ret) - break; + if (timeout && !timeout->task) { + ret = -ETIMEDOUT; + break; + } + if (signal_pending_state(state, current)) { + ret = -EINTR; + break; } raw_spin_unlock_irq(&lock->wait_lock); - debug_rt_mutex_print_deadlock(waiter); - schedule(); raw_spin_lock_irq(&lock->wait_lock); @@ -1183,8 +1167,8 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } -static void rt_mutex_handle_deadlock(int res, int detect_deadlock, - struct rt_mutex_waiter *w) +static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock, + struct rt_mutex_waiter *w) { /* * If the result is not -EDEADLOCK or the caller requested @@ -1194,9 +1178,9 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, return; /* - * Yell lowdly and stop the task right here. + * Yell loudly and stop the task right here. */ - rt_mutex_print_deadlock(w); + WARN(1, "rtmutex deadlock detected\n"); while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); @@ -1206,10 +1190,9 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, /* * Slow path lock function: */ -static int __sched -rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) +static int __sched rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk) { struct rt_mutex_waiter waiter; unsigned long flags; @@ -1268,7 +1251,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, return ret; } -static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) +static int __sched __rt_mutex_slowtrylock(struct rt_mutex *lock) { int ret = try_to_take_rt_mutex(lock, current, NULL); @@ -1284,7 +1267,7 @@ static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock) /* * Slow path try-lock function: */ -static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) +static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock) { unsigned long flags; int ret; @@ -1311,13 +1294,24 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) } /* + * Performs the wakeup of the top-waiter and re-enables preemption. + */ +void __sched rt_mutex_postunlock(struct wake_q_head *wake_q) +{ + wake_up_q(wake_q); + + /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ + preempt_enable(); +} + +/* * Slow path to release a rt-mutex. * * Return whether the current task needs to call rt_mutex_postunlock(). */ -static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) +static void __sched rt_mutex_slowunlock(struct rt_mutex *lock) { + DEFINE_WAKE_Q(wake_q); unsigned long flags; /* irqsave required to support early boot calls */ @@ -1359,7 +1353,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, while (!rt_mutex_has_waiters(lock)) { /* Drops lock->wait_lock ! */ if (unlock_rt_mutex_safe(lock, flags) == true) - return false; + return; /* Relock the rtmutex and try again */ raw_spin_lock_irqsave(&lock->wait_lock, flags); } @@ -1370,10 +1364,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * * Queue the next waiter for wakeup once we release the wait_lock. */ - mark_wakeup_next_waiter(wake_q, lock); + mark_wakeup_next_waiter(&wake_q, lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - return true; /* call rt_mutex_postunlock() */ + rt_mutex_postunlock(&wake_q); } /* @@ -1382,74 +1376,21 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * The atomic acquire/release ops are compiled away, when either the * architecture does not support cmpxchg or when debugging is enabled. */ -static inline int -rt_mutex_fastlock(struct rt_mutex *lock, int state, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) -{ - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; - - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); -} - -static inline int -rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) +static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state, + unsigned int subclass) { - if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 0; + int ret; - return slowfn(lock, state, timeout, chwalk); -} + might_sleep(); + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -static inline int -rt_mutex_fasttrylock(struct rt_mutex *lock, - int (*slowfn)(struct rt_mutex *lock)) -{ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) - return 1; - - return slowfn(lock); -} - -/* - * Performs the wakeup of the top-waiter and re-enables preemption. - */ -void rt_mutex_postunlock(struct wake_q_head *wake_q) -{ - wake_up_q(wake_q); - - /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ - preempt_enable(); -} - -static inline void -rt_mutex_fastunlock(struct rt_mutex *lock, - bool (*slowfn)(struct rt_mutex *lock, - struct wake_q_head *wqh)) -{ - DEFINE_WAKE_Q(wake_q); - - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) - return; - - if (slowfn(lock, &wake_q)) - rt_mutex_postunlock(&wake_q); -} - -static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) -{ - might_sleep(); + return 0; - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); + ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + if (ret) + mutex_release(&lock->dep_map, _RET_IP_); + return ret; } #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -1461,7 +1402,7 @@ static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) */ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) { - __rt_mutex_lock(lock, subclass); + __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass); } EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); @@ -1474,7 +1415,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); */ void __sched rt_mutex_lock(struct rt_mutex *lock) { - __rt_mutex_lock(lock, 0); + __rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0); } EXPORT_SYMBOL_GPL(rt_mutex_lock); #endif @@ -1490,82 +1431,37 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { - int ret; - - might_sleep(); - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); - if (ret) - mutex_release(&lock->dep_map, _RET_IP_); - - return ret; + return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); -/* - * Futex variant, must not use fastpath. - */ -int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) -{ - return rt_mutex_slowtrylock(lock); -} - -int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) -{ - return __rt_mutex_slowtrylock(lock); -} - -/** - * rt_mutex_timed_lock - lock a rt_mutex interruptible - * the timeout structure is provided - * by the caller - * - * @lock: the rt_mutex to be locked - * @timeout: timeout structure or NULL (no timeout) - * - * Returns: - * 0 on success - * -EINTR when interrupted by a signal - * -ETIMEDOUT when the timeout expired - */ -int -rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) -{ - int ret; - - might_sleep(); - - mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); - ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_MIN_CHAINWALK, - rt_mutex_slowlock); - if (ret) - mutex_release(&lock->dep_map, _RET_IP_); - - return ret; -} -EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); - /** * rt_mutex_trylock - try to lock a rt_mutex * * @lock: the rt_mutex to be locked * - * This function can only be called in thread context. It's safe to - * call it from atomic regions, but not from hard interrupt or soft - * interrupt context. + * This function can only be called in thread context. It's safe to call it + * from atomic regions, but not from hard or soft interrupt context. * - * Returns 1 on success and 0 on contention + * Returns: + * 1 on success + * 0 on contention */ int __sched rt_mutex_trylock(struct rt_mutex *lock) { int ret; - if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) && WARN_ON_ONCE(!in_task())) return 0; - ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); + /* + * No lockdep annotation required because lockdep disables the fast + * path. + */ + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return 1; + + ret = rt_mutex_slowtrylock(lock); if (ret) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); @@ -1581,10 +1477,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock); void __sched rt_mutex_unlock(struct rt_mutex *lock) { mutex_release(&lock->dep_map, _RET_IP_); - rt_mutex_fastunlock(lock, rt_mutex_slowunlock); + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) + return; + + rt_mutex_slowunlock(lock); } EXPORT_SYMBOL_GPL(rt_mutex_unlock); +/* + * Futex variants, must not use fastpath. + */ +int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) +{ + return rt_mutex_slowtrylock(lock); +} + +int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock) +{ + return __rt_mutex_slowtrylock(lock); +} + /** * __rt_mutex_futex_unlock - Futex variant, that since futex variants * do not use the fast-path, can be simple and will not need to retry. @@ -1593,7 +1505,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock); * @wake_q: The wake queue head from which to get the next lock waiter */ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) + struct wake_q_head *wake_q) { lockdep_assert_held(&lock->wait_lock); @@ -1630,23 +1542,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) } /** - * rt_mutex_destroy - mark a mutex unusable - * @lock: the mutex to be destroyed - * - * This function marks the mutex uninitialized, and any subsequent - * use of the mutex is forbidden. The mutex must not be locked when - * this function is called. - */ -void rt_mutex_destroy(struct rt_mutex *lock) -{ - WARN_ON(rt_mutex_is_locked(lock)); -#ifdef CONFIG_DEBUG_RT_MUTEXES - lock->magic = NULL; -#endif -} -EXPORT_SYMBOL_GPL(rt_mutex_destroy); - -/** * __rt_mutex_init - initialize the rt_mutex * * @lock: The rt_mutex to be initialized @@ -1657,15 +1552,13 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); * * Initializing of a locked rt_mutex is not allowed */ -void __rt_mutex_init(struct rt_mutex *lock, const char *name, +void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) { - lock->owner = NULL; - raw_spin_lock_init(&lock->wait_lock); - lock->waiters = RB_ROOT_CACHED; + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); - if (name && key) - debug_rt_mutex_init(lock, name, key); + __rt_mutex_basic_init(lock); } EXPORT_SYMBOL_GPL(__rt_mutex_init); @@ -1683,11 +1576,10 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); * possible at this point because the pi_state which contains the rtmutex * is not yet visible to other tasks. */ -void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner) +void __sched rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) { - __rt_mutex_init(lock, NULL, NULL); - debug_rt_mutex_proxy_lock(lock, proxy_owner); + __rt_mutex_basic_init(lock); rt_mutex_set_owner(lock, proxy_owner); } @@ -1703,7 +1595,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, * possible because it belongs to the pi_state which is about to be freed * and it is not longer visible to other tasks. */ -void rt_mutex_proxy_unlock(struct rt_mutex *lock) +void __sched rt_mutex_proxy_unlock(struct rt_mutex *lock) { debug_rt_mutex_proxy_unlock(lock); rt_mutex_set_owner(lock, NULL); @@ -1728,9 +1620,9 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock) * * Special API call for PI-futex support. */ -int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task) +int __sched __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task) { int ret; @@ -1753,8 +1645,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } - debug_rt_mutex_print_deadlock(waiter); - return ret; } @@ -1777,9 +1667,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, * * Special API call for PI-futex support. */ -int rt_mutex_start_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, - struct task_struct *task) +int __sched rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task) { int ret; @@ -1793,26 +1683,6 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, } /** - * rt_mutex_next_owner - return the next owner of the lock - * - * @lock: the rt lock query - * - * Returns the next owner of the lock or NULL - * - * Caller has to serialize against other accessors to the lock - * itself. - * - * Special API call for PI-futex support - */ -struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) -{ - if (!rt_mutex_has_waiters(lock)) - return NULL; - - return rt_mutex_top_waiter(lock)->task; -} - -/** * rt_mutex_wait_proxy_lock() - Wait for lock acquisition * @lock: the rt_mutex we were woken on * @to: the timeout, null if none. hrtimer should already have @@ -1829,9 +1699,9 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) * * Special API call for PI-futex support */ -int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *to, - struct rt_mutex_waiter *waiter) +int __sched rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *to, + struct rt_mutex_waiter *waiter) { int ret; @@ -1869,8 +1739,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, * * Special API call for PI-futex support */ -bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter) +bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter) { bool cleanup = false; @@ -1905,3 +1775,11 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, return cleanup; } + +#ifdef CONFIG_DEBUG_RT_MUTEXES +void rt_mutex_debug_task_free(struct task_struct *task) +{ + DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root)); + DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); +} +#endif diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h deleted file mode 100644 index 732f96abf462..000000000000 --- a/kernel/locking/rtmutex.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * RT-Mutexes: blocking mutual exclusion locks with PI support - * - * started by Ingo Molnar and Thomas Gleixner: - * - * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> - * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> - * - * This file contains macros used solely by rtmutex.c. - * Non-debug version. - */ - -#define rt_mutex_deadlock_check(l) (0) -#define debug_rt_mutex_init_waiter(w) do { } while (0) -#define debug_rt_mutex_free_waiter(w) do { } while (0) -#define debug_rt_mutex_lock(l) do { } while (0) -#define debug_rt_mutex_proxy_lock(l,p) do { } while (0) -#define debug_rt_mutex_proxy_unlock(l) do { } while (0) -#define debug_rt_mutex_unlock(l) do { } while (0) -#define debug_rt_mutex_init(m, n, k) do { } while (0) -#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) -#define debug_rt_mutex_print_deadlock(w) do { } while (0) -#define debug_rt_mutex_reset_waiter(w) do { } while (0) - -static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) -{ - WARN(1, "rtmutex deadlock detected\n"); -} - -static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w, - enum rtmutex_chainwalk walk) -{ - return walk == RT_MUTEX_FULL_CHAINWALK; -} diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index ca6fb489007b..a90c22abdbca 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -13,6 +13,7 @@ #ifndef __KERNEL_RTMUTEX_COMMON_H #define __KERNEL_RTMUTEX_COMMON_H +#include <linux/debug_locks.h> #include <linux/rtmutex.h> #include <linux/sched/wake_q.h> @@ -23,34 +24,30 @@ * @tree_entry: pi node to enqueue into the mutex waiters tree * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree * @task: task reference to the blocked task + * @lock: Pointer to the rt_mutex on which the waiter blocks + * @prio: Priority of the waiter + * @deadline: Deadline of the waiter if applicable */ struct rt_mutex_waiter { - struct rb_node tree_entry; - struct rb_node pi_tree_entry; + struct rb_node tree_entry; + struct rb_node pi_tree_entry; struct task_struct *task; struct rt_mutex *lock; -#ifdef CONFIG_DEBUG_RT_MUTEXES - unsigned long ip; - struct pid *deadlock_task_pid; - struct rt_mutex *deadlock_lock; -#endif - int prio; - u64 deadline; + int prio; + u64 deadline; }; /* - * Various helpers to access the waiters-tree: + * Must be guarded because this header is included from rcu/tree_plugin.h + * unconditionally. */ - #ifdef CONFIG_RT_MUTEXES - static inline int rt_mutex_has_waiters(struct rt_mutex *lock) { return !RB_EMPTY_ROOT(&lock->waiters.rb_root); } -static inline struct rt_mutex_waiter * -rt_mutex_top_waiter(struct rt_mutex *lock) +static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex *lock) { struct rb_node *leftmost = rb_first_cached(&lock->waiters); struct rt_mutex_waiter *w = NULL; @@ -67,42 +64,12 @@ static inline int task_has_pi_waiters(struct task_struct *p) return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root); } -static inline struct rt_mutex_waiter * -task_top_pi_waiter(struct task_struct *p) -{ - return rb_entry(p->pi_waiters.rb_leftmost, - struct rt_mutex_waiter, pi_tree_entry); -} - -#else - -static inline int rt_mutex_has_waiters(struct rt_mutex *lock) -{ - return false; -} - -static inline struct rt_mutex_waiter * -rt_mutex_top_waiter(struct rt_mutex *lock) +static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p) { - return NULL; -} - -static inline int task_has_pi_waiters(struct task_struct *p) -{ - return false; + return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, + pi_tree_entry); } -static inline struct rt_mutex_waiter * -task_top_pi_waiter(struct task_struct *p) -{ - return NULL; -} - -#endif - -/* - * lock->owner state tracking: - */ #define RT_MUTEX_HAS_WAITERS 1UL static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) @@ -111,6 +78,13 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); } +#else /* CONFIG_RT_MUTEXES */ +/* Used in rcu/tree_plugin.h */ +static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) +{ + return NULL; +} +#endif /* !CONFIG_RT_MUTEXES */ /* * Constants for rt mutex functions which have a selectable deadlock @@ -127,10 +101,16 @@ enum rtmutex_chainwalk { RT_MUTEX_FULL_CHAINWALK, }; +static inline void __rt_mutex_basic_init(struct rt_mutex *lock) +{ + lock->owner = NULL; + raw_spin_lock_init(&lock->wait_lock); + lock->waiters = RB_ROOT_CACHED; +} + /* * PI-futex support (proxy locking functions, etc.): */ -extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); @@ -156,10 +136,29 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, extern void rt_mutex_postunlock(struct wake_q_head *wake_q); -#ifdef CONFIG_DEBUG_RT_MUTEXES -# include "rtmutex-debug.h" -#else -# include "rtmutex.h" -#endif +/* Debug functions */ +static inline void debug_rt_mutex_unlock(struct rt_mutex *lock) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); +} + +static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); +} + +static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + memset(waiter, 0x11, sizeof(*waiter)); +} + +static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) +{ + if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) + memset(waiter, 0x22, sizeof(*waiter)); +} #endif diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index abba5df50006..809b0016d344 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -632,7 +632,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) } /* - * The rwsem_spin_on_owner() function returns the folowing 4 values + * The rwsem_spin_on_owner() function returns the following 4 values * depending on the lock owner state. * OWNER_NULL : owner is currently NULL * OWNER_WRITER: when owner changes and is a writer @@ -819,7 +819,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) * we try to get it. The new owner may be a spinnable * writer. * - * To take advantage of two scenarios listed agove, the RT + * To take advantage of two scenarios listed above, the RT * task is made to retry one more time to see if it can * acquire the lock or continue spinning on the new owning * writer. Of course, if the time lag is long enough or the diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 0ff08380f531..c8d7ad9fb9b2 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -58,10 +58,10 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state); /* * We build the __lock_function inlines here. They are too large for * inlining all over the place, but here is only one user per function - * which embedds them into the calling _lock_function below. + * which embeds them into the calling _lock_function below. * * This could be a long-held lock. We both prepare to spin for a long - * time (making _this_ CPU preemptable if possible), and we also signal + * time (making _this_ CPU preemptible if possible), and we also signal * towards that other CPU that it should break the lock ASAP. */ #define BUILD_LOCK_OPS(op, locktype) \ |