summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-09-27 03:02:28 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2012-09-27 18:52:38 +0200
commitb2cc2a074de75671bbed5e2dda67a9252ef353ea (patch)
tree8e01fcacb8bad82c4351fa82b351ecc185fe8056 /arch/x86/kernel/cpu
parentx86, suspend: On wakeup always initialize cr4 and EFER (diff)
downloadlinux-b2cc2a074de75671bbed5e2dda67a9252ef353ea.tar.xz
linux-b2cc2a074de75671bbed5e2dda67a9252ef353ea.zip
x86, smep, smap: Make the switching functions one-way
There is no fundamental reason why we should switch SMEP and SMAP on during early cpu initialization just to switch them off again. Now with %eflags and %cr4 forced to be initialized to a clean state, we only need the one-way enable. Also, make the functions inline to make them (somewhat) harder to abuse. This does mean that SMEP and SMAP do not get initialized anywhere near as early. Even using early_param() instead of __setup() doesn't give us control early enough to do this during the early cpu initialization phase. This seems reasonable to me, because SMEP and SMAP should not matter until we have userspace to protect ourselves from, but it does potentially make it possible for a bug involving a "leak of permissions to userspace" to get uncaught. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/common.c49
1 files changed, 18 insertions, 31 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 44aec5d4dfaf..fefd9b7e93e1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -259,48 +259,36 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
}
#endif
-static int disable_smep __cpuinitdata;
static __init int setup_disable_smep(char *arg)
{
- disable_smep = 1;
+ setup_clear_cpu_cap(X86_FEATURE_SMEP);
return 1;
}
__setup("nosmep", setup_disable_smep);
-static __cpuinit void setup_smep(struct cpuinfo_x86 *c)
+static __always_inline void setup_smep(struct cpuinfo_x86 *c)
{
- if (cpu_has(c, X86_FEATURE_SMEP)) {
- if (unlikely(disable_smep)) {
- setup_clear_cpu_cap(X86_FEATURE_SMEP);
- clear_in_cr4(X86_CR4_SMEP);
- } else
- set_in_cr4(X86_CR4_SMEP);
- }
+ if (cpu_has(c, X86_FEATURE_SMEP))
+ set_in_cr4(X86_CR4_SMEP);
}
-static int disable_smap __cpuinitdata;
static __init int setup_disable_smap(char *arg)
{
- disable_smap = 1;
+ setup_clear_cpu_cap(X86_FEATURE_SMAP);
return 1;
}
__setup("nosmap", setup_disable_smap);
-static __cpuinit void setup_smap(struct cpuinfo_x86 *c)
+static __always_inline void setup_smap(struct cpuinfo_x86 *c)
{
- if (cpu_has(c, X86_FEATURE_SMAP)) {
- if (unlikely(disable_smap)) {
- setup_clear_cpu_cap(X86_FEATURE_SMAP);
- clear_in_cr4(X86_CR4_SMAP);
- } else {
- set_in_cr4(X86_CR4_SMAP);
- /*
- * Don't use clac() here since alternatives
- * haven't run yet...
- */
- asm volatile(__stringify(__ASM_CLAC) ::: "memory");
- }
- }
+ unsigned long eflags;
+
+ /* This should have been cleared long ago */
+ raw_local_save_flags(eflags);
+ BUG_ON(eflags & X86_EFLAGS_AC);
+
+ if (cpu_has(c, X86_FEATURE_SMAP))
+ set_in_cr4(X86_CR4_SMAP);
}
/*
@@ -737,9 +725,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
c->cpu_index = 0;
filter_cpuid_features(c, false);
- setup_smep(c);
- setup_smap(c);
-
if (this_cpu->c_bsp_init)
this_cpu->c_bsp_init(c);
}
@@ -824,8 +809,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
c->phys_proc_id = c->initial_apicid;
}
- setup_smep(c);
-
get_model_name(c); /* Default name */
detect_nopl(c);
@@ -890,6 +873,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
/* Disable the PN if appropriate */
squash_the_stupid_serial_number(c);
+ /* Set up SMEP/SMAP */
+ setup_smep(c);
+ setup_smap(c);
+
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."