summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2012-09-21 21:43:14 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2012-09-21 21:45:27 +0200
commit40d3cd6695014bf3c44e2ca66b610b18acaf923d (patch)
tree535ecc7a30ea16221efbd16fcd1717e8ed4b6183 /arch/x86/mm/fault.c
parentx86, smap: Turn on Supervisor Mode Access Prevention (diff)
downloadlinux-40d3cd6695014bf3c44e2ca66b610b18acaf923d.tar.xz
linux-40d3cd6695014bf3c44e2ca66b610b18acaf923d.zip
x86, smap: A page fault due to SMAP is an oops
If we get a page fault due to SMAP, trigger an oops rather than spinning forever. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Link: http://lkml.kernel.org/r/1348256595-29119-11-git-send-email-hpa@linux.intel.com
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 76dcd9d8e0bc..f2fb75d46b96 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -995,6 +995,17 @@ static int fault_in_kernel_space(unsigned long address)
return address >= TASK_SIZE_MAX;
}
+static inline bool smap_violation(int error_code, struct pt_regs *regs)
+{
+ if (error_code & PF_USER)
+ return false;
+
+ if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
+ return false;
+
+ return true;
+}
+
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -1088,6 +1099,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
+ if (static_cpu_has(X86_FEATURE_SMAP)) {
+ if (unlikely(smap_violation(error_code, regs))) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+ }
+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*