diff options
author | Mark Rutland <mark.rutland@arm.com> | 2022-02-14 17:52:16 +0100 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-02-19 11:11:09 +0100 |
commit | 1b2d3451ee50a0968cb9933f726e50b368ba5073 (patch) | |
tree | ce9ff2d8d3aa9d468f0288a5713ba2dfb3ab7006 /arch/arm64/include | |
parent | arm64: entry: Centralize preemption decision (diff) | |
download | linux-1b2d3451ee50a0968cb9933f726e50b368ba5073.tar.xz linux-1b2d3451ee50a0968cb9933f726e50b368ba5073.zip |
arm64: Support PREEMPT_DYNAMIC
This patch enables support for PREEMPT_DYNAMIC on arm64, allowing the
preemption model to be chosen at boot time.
Specifically, this patch selects HAVE_PREEMPT_DYNAMIC_KEY, so that each
preemption function is an out-of-line call with an early return
depending upon a static key. This leaves almost all the codegen up to
the compiler, and side-steps a number of pain points with static calls
(e.g. interaction with CFI schemes). This should have no worse overhead
than using non-inline static calls, as those use out-of-line trampolines
with early returns.
For example, the dynamic_cond_resched() wrapper looks as follows when
enabled. When disabled, the first `B` is replaced with a `NOP`,
resulting in an early return.
| <dynamic_cond_resched>:
| bti c
| b <dynamic_cond_resched+0x10> // or `nop`
| mov w0, #0x0
| ret
| mrs x0, sp_el0
| ldr x0, [x0, #8]
| cbnz x0, <dynamic_cond_resched+0x8>
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
... compared to the regular form of the function:
| <__cond_resched>:
| bti c
| mrs x0, sp_el0
| ldr x1, [x0, #8]
| cbz x1, <__cond_resched+0x18>
| mov w0, #0x0
| ret
| paciasp
| stp x29, x30, [sp, #-16]!
| mov x29, sp
| bl <preempt_schedule_common>
| mov w0, #0x1
| ldp x29, x30, [sp], #16
| autiasp
| ret
Since arm64 does not yet use the generic entry code, we must define our
own `sk_dynamic_irqentry_exit_cond_resched`, which will be
enabled/disabled by the common code in kernel/sched/core.c. All other
preemption functions and associated static keys are defined there.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20220214165216.2231574-8-mark.rutland@arm.com
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/preempt.h | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index e83f0982b99c..0159b625cc7f 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -2,6 +2,7 @@ #ifndef __ASM_PREEMPT_H #define __ASM_PREEMPT_H +#include <linux/jump_label.h> #include <linux/thread_info.h> #define PREEMPT_NEED_RESCHED BIT(32) @@ -80,10 +81,24 @@ static inline bool should_resched(int preempt_offset) } #ifdef CONFIG_PREEMPTION + void preempt_schedule(void); -#define __preempt_schedule() preempt_schedule() void preempt_schedule_notrace(void); -#define __preempt_schedule_notrace() preempt_schedule_notrace() + +#ifdef CONFIG_PREEMPT_DYNAMIC + +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +void dynamic_preempt_schedule(void); +#define __preempt_schedule() dynamic_preempt_schedule() +void dynamic_preempt_schedule_notrace(void); +#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace() + +#else /* CONFIG_PREEMPT_DYNAMIC */ + +#define __preempt_schedule() preempt_schedule() +#define __preempt_schedule_notrace() preempt_schedule_notrace() + +#endif /* CONFIG_PREEMPT_DYNAMIC */ #endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ |