diff options
author | Valentin Schneider <valentin.schneider@arm.com> | 2021-07-07 18:33:38 +0200 |
---|---|---|
committer | Vasily Gorbik <gor@linux.ibm.com> | 2021-07-08 22:12:18 +0200 |
commit | 6a942f5780545ebd11aca8b3ac4b163397962322 (patch) | |
tree | f0a20802aa79e32712f4b3ca893166a3072ab98b /arch/s390 | |
parent | s390/linkage: increase asm symbols alignment to 16 (diff) | |
download | linux-6a942f5780545ebd11aca8b3ac4b163397962322.tar.xz linux-6a942f5780545ebd11aca8b3ac4b163397962322.zip |
s390: preempt: Fix preempt_count initialization
S390's init_idle_preempt_count(p, cpu) doesn't actually let us initialize the
preempt_count of the requested CPU's idle task: it unconditionally writes
to the current CPU's. This clearly conflicts with idle_threads_init(),
which intends to initialize *all* the idle tasks, including their
preempt_count (or their CPU's, if the arch uses a per-CPU preempt_count).
Unfortunately, it seems the way s390 does things doesn't let us initialize
every possible CPU's preempt_count early on, as the pages where this
resides are only allocated when a CPU is brought up and are freed when it
is brought down.
Let the arch-specific code set a CPU's preempt_count when its lowcore is
allocated, and turn init_idle_preempt_count() into an empty stub.
Fixes: f1a0a376ca0c ("sched/core: Initialize the idle task with preemption disabled")
Reported-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Guenter Roeck <linux@roeck-us.net>
Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
Link: https://lore.kernel.org/r/20210707163338.1623014-1-valentin.schneider@arm.com
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/preempt.h | 16 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 1 |
3 files changed, 6 insertions, 12 deletions
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index 23ff51be7e29..d9d5350cc3ec 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc) old, new) != old); } -#define init_task_preempt_count(p) do { } while (0) - -#define init_idle_preempt_count(p, cpu) do { \ - S390_lowcore.preempt_count = PREEMPT_DISABLED; \ -} while (0) - static inline void set_preempt_need_resched(void) { __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); @@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc) S390_lowcore.preempt_count = pc; } -#define init_task_preempt_count(p) do { } while (0) - -#define init_idle_preempt_count(p, cpu) do { \ - S390_lowcore.preempt_count = PREEMPT_DISABLED; \ -} while (0) - static inline void set_preempt_need_resched(void) { } @@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset) #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ +#define init_task_preempt_count(p) do { } while (0) +/* Deferred to CPU bringup time */ +#define init_idle_preempt_count(p, cpu) do { } while (0) + #ifdef CONFIG_PREEMPTION extern void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index d55c636e4889..9e1f538e58e6 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -442,6 +442,7 @@ static void __init setup_lowcore_dat_off(void) lc->br_r1_trampoline = 0x07f1; /* br %r1 */ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); + lc->preempt_count = PREEMPT_DISABLED; set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a0a8c0abf7de..8984711f72ed 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -210,6 +210,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) lc->br_r1_trampoline = 0x07f1; /* br %r1 */ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); + lc->preempt_count = PREEMPT_DISABLED; if (nmi_alloc_per_cpu(lc)) goto out; lowcore_ptr[cpu] = lc; |