diff options
author | Fenghua Yu <fenghua.yu@intel.com> | 2014-05-29 20:12:38 +0200 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-05-29 23:32:57 +0200 |
commit | facbf4d91ae64f84ef93a00e4037135cd9f4b2ab (patch) | |
tree | b2f8cdf1e2ecfd04cf901421f03fd340581a3d1f /arch/x86/include/asm/xsave.h | |
parent | x86/xsaves: Use xsaves/xrstors for context switch (diff) | |
download | linux-facbf4d91ae64f84ef93a00e4037135cd9f4b2ab.tar.xz linux-facbf4d91ae64f84ef93a00e4037135cd9f4b2ab.zip |
x86/xsaves: Use xsave/xrstor for saving and restoring user space context
We use legacy xsave/xrstor to save and restore standard form of xsave area
in user space context. No xsaveopt or xsaves is used here for two reasons.
First, we don't want to use modified optimization which is implemented in
xsaveopt and xsaves because xrstor/xrstors might track a wrong user space
application.
Secondly, we don't use compacted format of xsave area for backward
compatibility because legacy user space applications only don't understand
the compacted format of the xsave area.
Using standard form of the xsave area may allocate more memory for
user context than compacted form, but preserves compatibility with
legacy applications. Furthermore, even with holes, the relevant cache
lines don't get touched and thus the performance impact is limited.
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Link: http://lkml.kernel.org/r/1401387164-43416-11-git-send-email-fenghua.yu@intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/include/asm/xsave.h')
-rw-r--r-- | arch/x86/include/asm/xsave.h | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 8b75824e41dd..0d1523146545 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -145,6 +145,16 @@ static inline int fpu_xrstor_checking(struct xsave_struct *fx) return xrstor_state(fx, -1); } +/* + * Save xstate to user space xsave area. + * + * We don't use modified optimization because xrstor/xrstors might track + * a different application. + * + * We don't use compacted format xsave area for + * backward compatibility for old applications which don't understand + * compacted format of xsave area. + */ static inline int xsave_user(struct xsave_struct __user *buf) { int err; @@ -158,35 +168,28 @@ static inline int xsave_user(struct xsave_struct __user *buf) return -EFAULT; __asm__ __volatile__(ASM_STAC "\n" - "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" + "1:"XSAVE"\n" "2: " ASM_CLAC "\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b,3b) - : [err] "=r" (err) + xstate_fault : "D" (buf), "a" (-1), "d" (-1), "0" (0) : "memory"); return err; } +/* + * Restore xstate from user space xsave area. + */ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) { - int err; + int err = 0; struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); u32 lmask = mask; u32 hmask = mask >> 32; __asm__ __volatile__(ASM_STAC "\n" - "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" + "1:"XRSTOR"\n" "2: " ASM_CLAC "\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b,3b) - : [err] "=r" (err) + xstate_fault : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) : "memory"); /* memory required? */ return err; |