diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2019-04-14 17:59:48 +0200 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2019-04-17 12:58:21 +0200 |
commit | a4af767ae59cc579569bbfe49513a0037d5989ee (patch) | |
tree | 9002343fbb6db1bdf4b8fb9945ce2e8659649cac /arch/x86/mm | |
parent | x86/exceptions: Add structs for exception stacks (diff) | |
download | linux-a4af767ae59cc579569bbfe49513a0037d5989ee.tar.xz linux-a4af767ae59cc579569bbfe49513a0037d5989ee.zip |
x86/cpu_entry_area: Prepare for IST guard pages
To allow guard pages between the IST stacks each stack needs to be
mapped individually.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190414160144.592691557@linutronix.de
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/cpu_entry_area.c | 37 |
1 files changed, 30 insertions, 7 deletions
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 6a09b84c13fe..2b1407662a6d 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -77,6 +77,34 @@ static void __init percpu_setup_debug_store(unsigned int cpu) #endif } +#ifdef CONFIG_X86_64 + +#define cea_map_stack(name) do { \ + npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ + cea_map_percpu_pages(cea->estacks.name## _stack, \ + estacks->name## _stack, npages, PAGE_KERNEL); \ + } while (0) + +static void __init percpu_setup_exception_stacks(unsigned int cpu) +{ + struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu); + struct cpu_entry_area *cea = get_cpu_entry_area(cpu); + unsigned int npages; + + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); + /* + * The exceptions stack mappings in the per cpu area are protected + * by guard pages so each stack must be mapped separately. + */ + cea_map_stack(DF); + cea_map_stack(NMI); + cea_map_stack(DB); + cea_map_stack(MCE); +} +#else +static inline void percpu_setup_exception_stacks(unsigned int cpu) {} +#endif + /* Setup the fixmap mappings only once per-processor */ static void __init setup_cpu_entry_area(unsigned int cpu) { @@ -134,13 +162,8 @@ static void __init setup_cpu_entry_area(unsigned int cpu) per_cpu(cpu_entry_area, cpu) = cea; #endif -#ifdef CONFIG_X86_64 - BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); - BUILD_BUG_ON(sizeof(exception_stacks) != - sizeof(((struct cpu_entry_area *)0)->estacks)); - cea_map_percpu_pages(&cea->estacks, &per_cpu(exception_stacks, cpu), - sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); -#endif + percpu_setup_exception_stacks(cpu); + percpu_setup_debug_store(cpu); } |