diff options
author | Anton Blanchard <anton@samba.org> | 2015-10-29 01:44:06 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-12-01 03:52:26 +0100 |
commit | 3eb5d5888dc68c9b187998ca4249b8b9fa481eeb (patch) | |
tree | 16c17e1867d56207321c404f361c1115aac6320e /arch/powerpc/include/asm/switch_to.h | |
parent | powerpc: Create disable_kernel_{fp,altivec,vsx,spe}() (diff) | |
download | linux-3eb5d5888dc68c9b187998ca4249b8b9fa481eeb.tar.xz linux-3eb5d5888dc68c9b187998ca4249b8b9fa481eeb.zip |
powerpc: Add ppc_strict_facility_enable boot option
Add a boot option that strictly manages the MSR unavailable bits.
This catches kernel uses of FP/Altivec/SPE that would otherwise
corrupt user state.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/switch_to.h')
-rw-r--r-- | arch/powerpc/include/asm/switch_to.h | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 438502f59550..9414dcb180d6 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -4,6 +4,8 @@ #ifndef _ASM_POWERPC_SWITCH_TO_H #define _ASM_POWERPC_SWITCH_TO_H +#include <asm/reg.h> + struct thread_struct; struct task_struct; struct pt_regs; @@ -26,15 +28,15 @@ extern void enable_kernel_spe(void); extern void load_up_spe(struct task_struct *); extern void switch_booke_debug_regs(struct debug_reg *new_debug); -static inline void disable_kernel_fp(void) { } -static inline void disable_kernel_altivec(void) { } -static inline void disable_kernel_spe(void) { } -static inline void disable_kernel_vsx(void) { } - #ifdef CONFIG_PPC_FPU extern void flush_fp_to_thread(struct task_struct *); extern void giveup_fpu(struct task_struct *); extern void __giveup_fpu(struct task_struct *); +static inline void disable_kernel_fp(void) +{ + msr_check_and_clear(MSR_FP); +} + #else static inline void flush_fp_to_thread(struct task_struct *t) { } static inline void giveup_fpu(struct task_struct *t) { } @@ -45,6 +47,10 @@ static inline void __giveup_fpu(struct task_struct *t) { } extern void flush_altivec_to_thread(struct task_struct *); extern void giveup_altivec(struct task_struct *); extern void __giveup_altivec(struct task_struct *); +static inline void disable_kernel_altivec(void) +{ + msr_check_and_clear(MSR_VEC); +} #else static inline void flush_altivec_to_thread(struct task_struct *t) { } static inline void giveup_altivec(struct task_struct *t) { } @@ -53,6 +59,10 @@ static inline void __giveup_altivec(struct task_struct *t) { } #ifdef CONFIG_VSX extern void flush_vsx_to_thread(struct task_struct *); +static inline void disable_kernel_vsx(void) +{ + msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX); +} #else static inline void flush_vsx_to_thread(struct task_struct *t) { @@ -63,6 +73,10 @@ static inline void flush_vsx_to_thread(struct task_struct *t) extern void flush_spe_to_thread(struct task_struct *); extern void giveup_spe(struct task_struct *); extern void __giveup_spe(struct task_struct *); +static inline void disable_kernel_spe(void) +{ + msr_check_and_clear(MSR_SPE); +} #else static inline void flush_spe_to_thread(struct task_struct *t) { } static inline void giveup_spe(struct task_struct *t) { } |