diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-14 05:21:18 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-14 05:21:18 +0200 |
commit | 050e9baa9dc9fbd9ce2b27f0056990fc9e0a08a0 (patch) | |
tree | 5087d180ea4c26c6b89f10bc160c15559cc02785 /arch | |
parent | Merge tag 'kbuild-v4.18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/m... (diff) | |
download | linux-050e9baa9dc9fbd9ce2b27f0056990fc9e0a08a0.tar.xz linux-050e9baa9dc9fbd9ce2b27f0056990fc9e0a08a0.zip |
Kbuild: rename CC_STACKPROTECTOR[_STRONG] config variables
The changes to automatically test for working stack protector compiler
support in the Kconfig files removed the special STACKPROTECTOR_AUTO
option that picked the strongest stack protector that the compiler
supported.
That was all a nice cleanup - it makes no sense to have the AUTO case
now that the Kconfig phase can just determine the compiler support
directly.
HOWEVER.
It also meant that doing "make oldconfig" would now _disable_ the strong
stackprotector if you had AUTO enabled, because in a legacy config file,
the sane stack protector configuration would look like
CONFIG_HAVE_CC_STACKPROTECTOR=y
# CONFIG_CC_STACKPROTECTOR_NONE is not set
# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
# CONFIG_CC_STACKPROTECTOR_STRONG is not set
CONFIG_CC_STACKPROTECTOR_AUTO=y
and when you ran this through "make oldconfig" with the Kbuild changes,
it would ask you about the regular CONFIG_CC_STACKPROTECTOR (that had
been renamed from CONFIG_CC_STACKPROTECTOR_REGULAR to just
CONFIG_CC_STACKPROTECTOR), but it would think that the STRONG version
used to be disabled (because it was really enabled by AUTO), and would
disable it in the new config, resulting in:
CONFIG_HAVE_CC_STACKPROTECTOR=y
CONFIG_CC_HAS_STACKPROTECTOR_NONE=y
CONFIG_CC_STACKPROTECTOR=y
# CONFIG_CC_STACKPROTECTOR_STRONG is not set
CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
That's dangerously subtle - people could suddenly find themselves with
the weaker stack protector setup without even realizing.
The solution here is to just rename not just the old RECULAR stack
protector option, but also the strong one. This does that by just
removing the CC_ prefix entirely for the user choices, because it really
is not about the compiler support (the compiler support now instead
automatially impacts _visibility_ of the options to users).
This results in "make oldconfig" actually asking the user for their
choice, so that we don't have any silent subtle security model changes.
The end result would generally look like this:
CONFIG_HAVE_CC_STACKPROTECTOR=y
CONFIG_CC_HAS_STACKPROTECTOR_NONE=y
CONFIG_STACKPROTECTOR=y
CONFIG_STACKPROTECTOR_STRONG=y
CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
where the "CC_" versions really are about internal compiler
infrastructure, not the user selections.
Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
25 files changed, 30 insertions, 30 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index ebbb45096191..c302b3dd0058 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -558,7 +558,7 @@ config HAVE_CC_STACKPROTECTOR config CC_HAS_STACKPROTECTOR_NONE def_bool $(cc-option,-fno-stack-protector) -config CC_STACKPROTECTOR +config STACKPROTECTOR bool "Stack Protector buffer overflow detection" depends on HAVE_CC_STACKPROTECTOR depends on $(cc-option,-fstack-protector) @@ -582,9 +582,9 @@ config CC_STACKPROTECTOR about 3% of all kernel functions, which increases kernel code size by about 0.3%. -config CC_STACKPROTECTOR_STRONG +config STACKPROTECTOR_STRONG bool "Strong Stack Protector" - depends on CC_STACKPROTECTOR + depends on STACKPROTECTOR depends on $(cc-option,-fstack-protector-strong) default y help diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 27c5381518d8..974d8d7d1bcd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -61,7 +61,7 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif BLANK(); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1752033b0070..179a9f6bd1e3 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -791,7 +791,7 @@ ENTRY(__switch_to) ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) ldr r7, [r2, #TI_TASK] ldr r8, =__stack_chk_guard .if (TSK_STACK_CANARY > IMM12_MASK) @@ -807,7 +807,7 @@ ENTRY(__switch_to) ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) str r7, [r8] #endif THUMB( mov ip, r4 ) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1523cb18b109..225d1c58d2de 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -39,7 +39,7 @@ #include <asm/tls.h> #include <asm/vdso.h> -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f08a2ed9db0d..e10bc363f533 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -59,7 +59,7 @@ #include <asm/processor.h> #include <asm/stacktrace.h> -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index c1cd41456d42..cbe4742d2fff 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -83,7 +83,7 @@ void output_task_defines(void) OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); -#if defined(CONFIG_CC_STACKPROTECTOR) +#if defined(CONFIG_STACKPROTECTOR) OFFSET(TASK_STACK_CANARY, task_struct, stack_canary); #endif DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index e42113fe2762..896080b445c2 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -61,7 +61,7 @@ #endif 3: -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 3775a8d694fb..8d85046adcc8 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -180,7 +180,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, return 0; } -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 665897139f30..71b1aafae1bb 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -36,7 +36,7 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 17cf9341c1cf..58232ae6cfae 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -31,7 +31,7 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 68b1a67533ce..4d1bfc848dd3 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -12,7 +12,7 @@ struct kmem_cache *task_xstate_cachep = NULL; unsigned int xstate_size; -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 93522069cb15..27fddb56b3e1 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -177,7 +177,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) { struct thread_struct *next_t = &next->thread; -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) __stack_chk_guard = next->stack_canary; #endif diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index bef8e2b202a8..2582881d19ce 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -239,7 +239,7 @@ ENTRY(__switch_to_asm) movl %esp, TASK_threadsp(%eax) movl TASK_threadsp(%edx), %esp -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR movl TASK_stack_canary(%edx), %ebx movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 3166b9674429..73a522d53b53 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -357,7 +357,7 @@ ENTRY(__switch_to_asm) movq %rsp, TASK_threadsp(%rdi) movq TASK_threadsp(%rsi), %rsp -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR movq TASK_stack_canary(%rsi), %rbx movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index e28add6b791f..cfd29ee8c3da 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -412,7 +412,7 @@ extern asmlinkage void ignore_sysret(void); void save_fsgs_for_kvm(void); #endif #else /* X86_64 */ -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* * Make sure stack canary segment base is cached-aligned: * "For Intel Atom processors, avoid non zero segment base address diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 8f09012b92e7..e293c122d0d5 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -146,7 +146,7 @@ # define __KERNEL_PERCPU 0 #endif -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR # define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) #else # define __KERNEL_STACK_CANARY 0 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 371b3a4af000..8ec97a62c245 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -34,7 +34,7 @@ #ifndef _ASM_STACKPROTECTOR_H #define _ASM_STACKPROTECTOR_H 1 -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <asm/tsc.h> #include <asm/processor.h> @@ -105,7 +105,7 @@ static inline void load_stack_canary_segment(void) #endif } -#else /* CC_STACKPROTECTOR */ +#else /* STACKPROTECTOR */ #define GDT_STACK_CANARY_INIT @@ -121,5 +121,5 @@ static inline void load_stack_canary_segment(void) #endif } -#endif /* CC_STACKPROTECTOR */ +#endif /* STACKPROTECTOR */ #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 76417a9aab73..dcb008c320fe 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -32,7 +32,7 @@ void common(void) { BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR OFFSET(TASK_stack_canary, task_struct, stack_canary); #endif diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index f91ba53e06c8..a4a3be399f4b 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -50,7 +50,7 @@ void foo(void) DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - offsetofend(struct cpu_entry_area, entry_stack_page.stack)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR BLANK(); OFFSET(stack_canary_offset, stack_canary, canary); #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index bf51e51d808d..b2dcd161f514 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -69,7 +69,7 @@ int main(void) OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); BLANK(); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(stack_canary_offset, offsetof(union irq_stack_union, stack_canary)); BLANK(); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 910b47ee8078..0df7151cfef4 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1599,7 +1599,7 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = (unsigned long)&init_thread_union + THREAD_SIZE; EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); #endif diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b59e4fb40fd9..abe6df15a8fb 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -375,7 +375,7 @@ ENDPROC(startup_32_smp) */ __INIT setup_once: -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* * Configure the stack canary. The linker can't handle this by * relocation. Manually set base address in stack canary diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 022cf918ec20..67904f55f188 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -76,7 +76,7 @@ int main(void) DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 5caff0744f3c..9cbc380e9572 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1971,7 +1971,7 @@ ENTRY(_switch_to) s32i a1, a2, THREAD_SP # save stack pointer #endif -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) movi a6, __stack_chk_guard l32i a8, a3, TASK_STACK_CANARY s32i a8, a6, 0 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 8dd0593fb2c4..483dcfb6e681 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -58,7 +58,7 @@ void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); |