diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-04-23 11:55:18 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-05-19 15:47:22 +0200 |
commit | 276983f8085db4a5f4e2cdcda6bce29a1da97eb0 (patch) | |
tree | be37a667d5c54b511a68ef298b8a82824a883dd2 /arch/x86/include/asm/fpu-internal.h | |
parent | x86/fpu: Print out whether we are doing lazy/eager FPU context switches (diff) | |
download | linux-276983f8085db4a5f4e2cdcda6bce29a1da97eb0.tar.xz linux-276983f8085db4a5f4e2cdcda6bce29a1da97eb0.zip |
x86/fpu: Eliminate the __thread_has_fpu() wrapper
Start migrating FPU methods towards using 'struct fpu *fpu'
directly. __thread_has_fpu() is just a trivial wrapper around
fpu->has_fpu, eliminate it.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/fpu-internal.h')
-rw-r--r-- | arch/x86/include/asm/fpu-internal.h | 16 |
1 files changed, 4 insertions, 12 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index e180fb96dd0d..c005d1fc1247 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -323,16 +323,6 @@ static inline int restore_fpu_checking(struct task_struct *tsk) return fpu_restore_checking(&tsk->thread.fpu); } -/* - * Software FPU state helpers. Careful: these need to - * be preemption protection *and* they need to be - * properly paired with the CR0.TS changes! - */ -static inline int __thread_has_fpu(struct task_struct *tsk) -{ - return tsk->thread.fpu.has_fpu; -} - /* Must be paired with an 'stts' after! */ static inline void __thread_clear_has_fpu(struct task_struct *tsk) { @@ -370,13 +360,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk) static inline void drop_fpu(struct task_struct *tsk) { + struct fpu *fpu = &tsk->thread.fpu; /* * Forget coprocessor state.. */ preempt_disable(); tsk->thread.fpu.counter = 0; - if (__thread_has_fpu(tsk)) { + if (fpu->has_fpu) { /* Ignore delayed exceptions from user space */ asm volatile("1: fwait\n" "2:\n" @@ -424,6 +415,7 @@ typedef struct { int preload; } fpu_switch_t; static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) { + struct fpu *old_fpu = &old->thread.fpu; fpu_switch_t fpu; /* @@ -433,7 +425,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta fpu.preload = tsk_used_math(new) && (use_eager_fpu() || new->thread.fpu.counter > 5); - if (__thread_has_fpu(old)) { + if (old_fpu->has_fpu) { if (!fpu_save_init(&old->thread.fpu)) task_disable_lazy_fpu_restore(old); else |