summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2015-06-07 20:37:04 +0200
committerIngo Molnar <mingo@kernel.org>2015-06-09 12:24:33 +0200
commita1149fc83a1f97612e72ec24a0bdbabff7b85e77 (patch)
tree6bc1db6d8c3cbf20c3b3fe30de585a41a7180c2e /arch/x86/mm
parentx86: Make is_64bit_mm() widely available (diff)
downloadlinux-a1149fc83a1f97612e72ec24a0bdbabff7b85e77.tar.xz
linux-a1149fc83a1f97612e72ec24a0bdbabff7b85e77.zip
x86/mpx: Add temporary variable to reduce masking
When we allocate a bounds table, we call mmap(), then add a "valid" bit to the value before storing it in to the bounds directory. If we fail along the way, we go and mask that valid bit _back_ out. That seems a little silly, and this makes it much more clear when we have a plain address versus an actual table _entry_. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Dave Hansen <dave@sr71.net> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20150607183704.3D69D5F4@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/mpx.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index c17fd27579af..4f7fb7c233cc 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -429,6 +429,7 @@ static int allocate_bt(long __user *bd_entry)
unsigned long expected_old_val = 0;
unsigned long actual_old_val = 0;
unsigned long bt_addr;
+ unsigned long bd_new_entry;
int ret = 0;
/*
@@ -441,7 +442,7 @@ static int allocate_bt(long __user *bd_entry)
/*
* Set the valid flag (kinda like _PAGE_PRESENT in a pte)
*/
- bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
+ bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
/*
* Go poke the address of the new bounds table in to the
@@ -455,7 +456,7 @@ static int allocate_bt(long __user *bd_entry)
* of the MPX code that have to pagefault_disable().
*/
ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
- expected_old_val, bt_addr);
+ expected_old_val, bd_new_entry);
if (ret)
goto out_unmap;
@@ -486,7 +487,7 @@ static int allocate_bt(long __user *bd_entry)
trace_mpx_new_bounds_table(bt_addr);
return 0;
out_unmap:
- vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES);
+ vm_munmap(bt_addr, MPX_BT_SIZE_BYTES);
return ret;
}