summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/fpu
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-04-03 18:41:50 +0200
committerBorislav Petkov <bp@suse.de>2019-04-12 15:02:41 +0200
commit926b21f37b072ae4c117052de45a975c6d468fec (patch)
tree510d711c87092667752e4b5c67e2650325866111 /arch/x86/kernel/fpu
parentx86/fpu: Inline copy_user_to_fpregs_zeroing() (diff)
downloadlinux-926b21f37b072ae4c117052de45a975c6d468fec.tar.xz
linux-926b21f37b072ae4c117052de45a975c6d468fec.zip
x86/fpu: Restore from kernel memory on the 64-bit path too
The 64-bit case (both 64-bit and 32-bit frames) loads the new state from user memory. However, doing this is not desired if the FPU state is going to be restored on return to userland: it would be required to disable preemption in order to avoid a context switch which would set TIF_NEED_FPU_LOAD. If this happens before the restore operation then the loaded registers would become volatile. Furthermore, disabling preemption while accessing user memory requires to disable the pagefault handler. An error during FXRSTOR would then mean that either a page fault occurred (and it would have to be retried with enabled page fault handler) or a #GP occurred because the xstate is bogus (after all, the signal handler can modify it). In order to avoid that mess, copy the FPU state from userland, validate it and then load it. The copy_kernel_…() helpers are basically just like the old helpers except that they operate on kernel memory and the fault handler just sets the error value and the caller handles it. copy_user_to_fpregs_zeroing() and its helpers remain and will be used later for a fastpath optimisation. [ bp: Clarify commit message. ] Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aubrey Li <aubrey.li@intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jann Horn <jannh@google.com> Cc: "Jason A. Donenfeld" <Jason@zx2c4.com> Cc: kvm ML <kvm@vger.kernel.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: x86-ml <x86@kernel.org> Link: https://lkml.kernel.org/r/20190403164156.19645-22-bigeasy@linutronix.de
Diffstat (limited to 'arch/x86/kernel/fpu')
-rw-r--r--arch/x86/kernel/fpu/signal.c62
1 files changed, 49 insertions, 13 deletions
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index c2ff43fbbd07..9ea1eaa4c9b1 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -234,7 +234,8 @@ sanitize_restored_xstate(union fpregs_state *state,
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
- convert_to_fxsr(&state->fxsave, ia32_env);
+ if (ia32_env)
+ convert_to_fxsr(&state->fxsave, ia32_env);
}
}
@@ -337,28 +338,63 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
kfree(tmp);
return err;
} else {
+ union fpregs_state *state;
+ void *tmp;
int ret;
+ tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ state = PTR_ALIGN(tmp, 64);
+
/*
* For 64-bit frames and 32-bit fsave frames, restore the user
* state to the registers directly (with exceptions handled).
*/
- if (use_xsave()) {
- if ((unsigned long)buf_fx % 64 || fx_only) {
- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- ret = copy_user_to_fxregs(buf_fx);
+ if ((unsigned long)buf_fx % 64)
+ fx_only = 1;
+
+ if (use_xsave() && !fx_only) {
+ u64 init_bv = xfeatures_mask & ~xfeatures;
+
+ if (using_compacted_format()) {
+ ret = copy_user_to_xstate(&state->xsave, buf_fx);
} else {
- u64 init_bv = xfeatures_mask & ~xfeatures;
- if (unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- ret = copy_user_to_xregs(buf_fx, xfeatures);
+ ret = __copy_from_user(&state->xsave, buf_fx, state_size);
+
+ if (!ret && state_size > offsetof(struct xregs_state, header))
+ ret = validate_xstate_header(&state->xsave.header);
}
+ if (ret)
+ goto err_out;
+
+ sanitize_restored_xstate(state, NULL, xfeatures, fx_only);
+
+ if (unlikely(init_bv))
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
+
} else if (use_fxsr()) {
- ret = copy_user_to_fxregs(buf_fx);
- } else
- ret = copy_user_to_fregs(buf_fx);
+ ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
+ if (ret)
+ goto err_out;
+ if (use_xsave()) {
+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ }
+ state->fxsave.mxcsr &= mxcsr_feature_mask;
+
+ ret = copy_kernel_to_fxregs_err(&state->fxsave);
+ } else {
+ ret = __copy_from_user(&state->fsave, buf_fx, state_size);
+ if (ret)
+ goto err_out;
+ ret = copy_kernel_to_fregs_err(&state->fsave);
+ }
+
+err_out:
+ kfree(tmp);
if (ret) {
fpu__clear(fpu);
return -1;