diff options
author | Ingo Molnar <mingo@kernel.org> | 2020-04-13 09:44:39 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-04-13 09:44:39 +0200 |
commit | 3b02a051d25d9600e9d403ad3043aed7de00160e (patch) | |
tree | 5b8f58b79328c04654bf5ab6286401057edeca8f /arch/powerpc/kernel/process.c | |
parent | kcsan, trace: Make KCSAN compatible with tracing (diff) | |
parent | Linux 5.7-rc1 (diff) | |
download | linux-3b02a051d25d9600e9d403ad3043aed7de00160e.tar.xz linux-3b02a051d25d9600e9d403ad3043aed7de00160e.zip |
Merge tag 'v5.7-rc1' into locking/kcsan, to resolve conflicts and refresh
Resolve these conflicts:
arch/x86/Kconfig
arch/x86/kernel/Makefile
Do a minor "evil merge" to move the KCSAN entry up a bit by a few lines
in the Kconfig to reduce the probability of future conflicts.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 124 |
1 files changed, 79 insertions, 45 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index fad50db9dcf2..9c21288f8645 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -236,23 +236,9 @@ void enable_kernel_fp(void) } } EXPORT_SYMBOL(enable_kernel_fp); - -static int restore_fp(struct task_struct *tsk) -{ - if (tsk->thread.load_fp) { - load_fp_state(¤t->thread.fp_state); - current->thread.load_fp++; - return 1; - } - return 0; -} -#else -static int restore_fp(struct task_struct *tsk) { return 0; } #endif /* CONFIG_PPC_FPU */ #ifdef CONFIG_ALTIVEC -#define loadvec(thr) ((thr).load_vec) - static void __giveup_altivec(struct task_struct *tsk) { unsigned long msr; @@ -318,21 +304,6 @@ void flush_altivec_to_thread(struct task_struct *tsk) } } EXPORT_SYMBOL_GPL(flush_altivec_to_thread); - -static int restore_altivec(struct task_struct *tsk) -{ - if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { - load_vr_state(&tsk->thread.vr_state); - tsk->thread.used_vr = 1; - tsk->thread.load_vec++; - - return 1; - } - return 0; -} -#else -#define loadvec(thr) 0 -static inline int restore_altivec(struct task_struct *tsk) { return 0; } #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX @@ -400,18 +371,6 @@ void flush_vsx_to_thread(struct task_struct *tsk) } } EXPORT_SYMBOL_GPL(flush_vsx_to_thread); - -static int restore_vsx(struct task_struct *tsk) -{ - if (cpu_has_feature(CPU_FTR_VSX)) { - tsk->thread.used_vsr = 1; - return 1; - } - - return 0; -} -#else -static inline int restore_vsx(struct task_struct *tsk) { return 0; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE @@ -511,6 +470,53 @@ void giveup_all(struct task_struct *tsk) } EXPORT_SYMBOL(giveup_all); +#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_FPU +static int restore_fp(struct task_struct *tsk) +{ + if (tsk->thread.load_fp) { + load_fp_state(¤t->thread.fp_state); + current->thread.load_fp++; + return 1; + } + return 0; +} +#else +static int restore_fp(struct task_struct *tsk) { return 0; } +#endif /* CONFIG_PPC_FPU */ + +#ifdef CONFIG_ALTIVEC +#define loadvec(thr) ((thr).load_vec) +static int restore_altivec(struct task_struct *tsk) +{ + if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { + load_vr_state(&tsk->thread.vr_state); + tsk->thread.used_vr = 1; + tsk->thread.load_vec++; + + return 1; + } + return 0; +} +#else +#define loadvec(thr) 0 +static inline int restore_altivec(struct task_struct *tsk) { return 0; } +#endif /* CONFIG_ALTIVEC */ + +#ifdef CONFIG_VSX +static int restore_vsx(struct task_struct *tsk) +{ + if (cpu_has_feature(CPU_FTR_VSX)) { + tsk->thread.used_vsr = 1; + return 1; + } + + return 0; +} +#else +static inline int restore_vsx(struct task_struct *tsk) { return 0; } +#endif /* CONFIG_VSX */ + /* * The exception exit path calls restore_math() with interrupts hard disabled * but the soft irq state not "reconciled". ftrace code that calls @@ -551,6 +557,7 @@ void notrace restore_math(struct pt_regs *regs) regs->msr = msr; } +#endif static void save_all(struct task_struct *tsk) { @@ -1634,11 +1641,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, p->thread.regs = childregs; childregs->gpr[3] = 0; /* Result from fork() */ if (clone_flags & CLONE_SETTLS) { -#ifdef CONFIG_PPC64 if (!is_32bit_task()) childregs->gpr[13] = tls; else -#endif childregs->gpr[2] = tls; } @@ -1976,6 +1981,32 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, return 0; } +static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p, + unsigned long nbytes) +{ +#ifdef CONFIG_PPC64 + unsigned long stack_page; + unsigned long cpu = task_cpu(p); + + stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE; + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + +# ifdef CONFIG_PPC_BOOK3S_64 + stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE; + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + + stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE; + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; +# endif +#endif + + return 0; +} + + int validate_sp(unsigned long sp, struct task_struct *p, unsigned long nbytes) { @@ -1987,7 +2018,10 @@ int validate_sp(unsigned long sp, struct task_struct *p, if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) return 1; - return valid_irq_stack(sp, p, nbytes); + if (valid_irq_stack(sp, p, nbytes)) + return 1; + + return valid_emergency_stack(sp, p, nbytes); } EXPORT_SYMBOL(validate_sp); @@ -2053,7 +2087,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) sp = (unsigned long) stack; if (sp == 0) { if (tsk == current) - sp = current_stack_pointer(); + sp = current_stack_frame(); else sp = tsk->thread.ksp; } |