diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2021-08-15 23:27:48 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2021-08-17 16:57:17 +0200 |
commit | 6991436c2b5d91d5358d9914ae2df22b9a1d1dc9 (patch) | |
tree | d736ebbfdcb39a025c712273067677deead69dfb | |
parent | sched/core: Rework the __schedule() preempt argument (diff) | |
download | linux-6991436c2b5d91d5358d9914ae2df22b9a1d1dc9.tar.xz linux-6991436c2b5d91d5358d9914ae2df22b9a1d1dc9.zip |
sched/core: Provide a scheduling point for RT locks
RT enabled kernels substitute spin/rwlocks with 'sleeping' variants based
on rtmutexes. Blocking on such a lock is similar to preemption versus:
- I/O scheduling and worker handling, because these functions might block
on another substituted lock, or come from a lock contention within these
functions.
- RCU considers this like a preemption, because the task might be in a read
side critical section.
Add a separate scheduling point for this, and hand a new scheduling mode
argument to __schedule() which allows, along with separate mode masks, to
handle this gracefully from within the scheduler, without proliferating that
to other subsystems like RCU.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211302.372319055@linutronix.de
-rw-r--r-- | include/linux/sched.h | 3 | ||||
-rw-r--r-- | kernel/sched/core.c | 20 |
2 files changed, 22 insertions, 1 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 02714b9a3ff9..746dfc06a35c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -288,6 +288,9 @@ extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); asmlinkage void preempt_schedule_irq(void); +#ifdef CONFIG_PREEMPT_RT + extern void schedule_rtlock(void); +#endif extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ebc24e136222..c89c1d45dd0b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5829,7 +5829,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) */ #define SM_NONE 0x0 #define SM_PREEMPT 0x1 -#define SM_MASK_PREEMPT (~0U) +#define SM_RTLOCK_WAIT 0x2 + +#ifndef CONFIG_PREEMPT_RT +# define SM_MASK_PREEMPT (~0U) +#else +# define SM_MASK_PREEMPT SM_PREEMPT +#endif /* * __schedule() is the main scheduler function. @@ -6134,6 +6140,18 @@ void __sched schedule_preempt_disabled(void) preempt_disable(); } +#ifdef CONFIG_PREEMPT_RT +void __sched notrace schedule_rtlock(void) +{ + do { + preempt_disable(); + __schedule(SM_RTLOCK_WAIT); + sched_preempt_enable_no_resched(); + } while (need_resched()); +} +NOKPROBE_SYMBOL(schedule_rtlock); +#endif + static void __sched notrace preempt_schedule_common(void) { do { |