diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-19 10:46:02 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-19 10:46:02 +0200 |
commit | bf98bae3d8a18745e54fef9fd71fd129f6e9f7e5 (patch) | |
tree | e174308d07b223d9493c120c444c02109b8880ad | |
parent | Merge tag 'powerpc-6.5-6' of git://git.kernel.org/pub/scm/linux/kernel/git/po... (diff) | |
parent | x86/srso: Correct the mitigation status when SMT is disabled (diff) | |
download | linux-bf98bae3d8a18745e54fef9fd71fd129f6e9f7e5.tar.xz linux-bf98bae3d8a18745e54fef9fd71fd129f6e9f7e5.zip |
Merge tag 'x86_urgent_for_v6.5_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov:
"Extraordinary embargoed times call for extraordinary measures. That's
why this week's x86/urgent branch is larger than usual, containing all
the known fallout fixes after the SRSO mitigation got merged.
I know, it is a bit late in the game but everyone who has reported a
bug stemming from the SRSO pile, has tested that branch and has
confirmed that it fixes their bug.
Also, I've run it on every possible hardware I have and it is looking
good. It is running on this very machine while I'm typing, for 2 days
now without an issue. Famous last words...
- Use LEA ...%rsp instead of ADD %rsp in the Zen1/2 SRSO return
sequence as latter clobbers flags which interferes with fastop
emulation in KVM, leading to guests freezing during boot
- A fix for the DIV(0) quotient data leak on Zen1 to clear the
divider buffers at the right time
- Disable the SRSO mitigation on unaffected configurations as it got
enabled there unnecessarily
- Change .text section name to fix CONFIG_LTO_CLANG builds
- Improve the optprobe indirect jmp check so that certain
configurations can still be able to use optprobes at all
- A serious and good scrubbing of the untraining routines by PeterZ:
- Add proper speculation stopping traps so that objtool is happy
- Adjust objtool to handle the new thunks
- Make the thunk pointer assignable to the different untraining
sequences at runtime, thus avoiding the alternative at the
return thunk. It simplifies the code a bit too.
- Add a entry_untrain_ret() main entry point which selects the
respective untraining sequence
- Rename things so that they're more clear
- Fix stack validation with FRAME_POINTER=y builds
- Fix static call patching to handle when a JMP to the return thunk
is the last insn on the very last module memory page
- Add more documentation about what each untraining routine does and
why"
* tag 'x86_urgent_for_v6.5_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/srso: Correct the mitigation status when SMT is disabled
x86/static_call: Fix __static_call_fixup()
objtool/x86: Fixup frame-pointer vs rethunk
x86/srso: Explain the untraining sequences a bit more
x86/cpu/kvm: Provide UNTRAIN_RET_VM
x86/cpu: Cleanup the untrain mess
x86/cpu: Rename srso_(.*)_alias to srso_alias_\1
x86/cpu: Rename original retbleed methods
x86/cpu: Clean up SRSO return thunk mess
x86/alternative: Make custom return thunk unconditional
objtool/x86: Fix SRSO mess
x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk()
x86/cpu: Fix __x86_return_thunk symbol type
x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT
x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG
x86/srso: Disable the mitigation on unaffected configurations
x86/CPU/AMD: Fix the DIV(0) initial fix attempt
x86/retpoline: Don't clobber RFLAGS during srso_safe_ret()
-rw-r--r-- | Documentation/admin-guide/hw-vuln/srso.rst | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/entry-common.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/nospec-branch.h | 49 | ||||
-rw-r--r-- | arch/x86/kernel/alternative.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 27 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/opt.c | 40 | ||||
-rw-r--r-- | arch/x86/kernel/static_call.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 20 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/svm/vmenter.S | 7 | ||||
-rw-r--r-- | arch/x86/lib/retpoline.S | 137 | ||||
-rw-r--r-- | tools/objtool/arch/x86/decode.c | 11 | ||||
-rw-r--r-- | tools/objtool/check.c | 45 | ||||
-rw-r--r-- | tools/objtool/include/objtool/arch.h | 1 | ||||
-rw-r--r-- | tools/objtool/include/objtool/elf.h | 1 | ||||
-rw-r--r-- | tools/perf/util/thread-stack.c | 4 |
18 files changed, 236 insertions, 133 deletions
diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index af59a9395662..b6cfb51cb0b4 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -141,8 +141,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 117903881fe4..ce8f50192ae3 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 3faf044569a5..c55cc243592e 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -272,9 +272,9 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif /* @@ -282,7 +282,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -293,14 +293,20 @@ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif +.endm -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +.macro UNTRAIN_RET_VM +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) + VALIDATE_UNRET_END + ALTERNATIVE_3 "", \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ + "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \ + __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif .endm @@ -309,15 +315,10 @@ defined(CONFIG_CALL_DEPTH_TRACKING) VALIDATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS -#endif .endm @@ -341,17 +342,24 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[]; extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); -extern void zen_untrain_ret(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + +extern void retbleed_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + +extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); + +extern void entry_untrain_ret(void); extern void entry_ibpb(void); -#ifdef CONFIG_CALL_THUNKS extern void (*x86_return_thunk)(void); -#else -#define x86_return_thunk (&__x86_return_thunk) -#endif #ifdef CONFIG_CALL_DEPTH_TRACKING extern void __x86_return_skl(void); @@ -478,9 +486,6 @@ enum ssb_mitigation { SPEC_STORE_BYPASS_SECCOMP, }; -extern char __indirect_thunk_start[]; -extern char __indirect_thunk_end[]; - static __always_inline void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) { diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2dcf3a06af09..099d58d02a26 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -687,10 +687,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) #ifdef CONFIG_RETHUNK -#ifdef CONFIG_CALL_THUNKS -void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -#endif - /* * Rewrite the compiler generated return thunk tail-calls. * diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 70f9d56f9305..7eca6a8abbb1 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1329,3 +1329,4 @@ void noinstr amd_clear_divider(void) asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) :: "a" (0), "d" (0), "r" (1)); } +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d02f73c5339d..f081d26616ac 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { @@ -165,6 +167,11 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); gds_select_mitigation(); } @@ -1035,6 +1042,9 @@ do_cmd_auto: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = retbleed_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -1044,6 +1054,7 @@ do_cmd_auto: case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); mitigate_smt = true; break; @@ -2417,9 +2428,10 @@ static void __init srso_select_mitigation(void) * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ - if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } } if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { @@ -2448,11 +2460,15 @@ static void __init srso_select_mitigation(void) * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); - if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); @@ -2696,6 +2712,9 @@ static ssize_t retbleed_show_state(char *buf) static ssize_t srso_show_state(char *buf) { + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); + return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 57b0037d0a99..517821b48391 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -226,7 +226,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) } /* Check whether insn is indirect jump */ -static int __insn_is_indirect_jump(struct insn *insn) +static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -260,26 +260,6 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) return (start <= target && target <= start + len); } -static int insn_is_indirect_jump(struct insn *insn) -{ - int ret = __insn_is_indirect_jump(insn); - -#ifdef CONFIG_RETPOLINE - /* - * Jump to x86_indirect_thunk_* is treated as an indirect jump. - * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with - * older gcc may use indirect jump. So we add this check instead of - * replace indirect-jump check. - */ - if (!ret) - ret = insn_jump_into_range(insn, - (unsigned long)__indirect_thunk_start, - (unsigned long)__indirect_thunk_end - - (unsigned long)__indirect_thunk_start); -#endif - return ret; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -334,9 +314,21 @@ static int can_optimize(unsigned long paddr) /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); - /* Check any instructions don't jump into target */ - if (insn_is_indirect_jump(&insn) || - insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, + /* + * Check any instructions don't jump into target, indirectly or + * directly. + * + * The indirect case is present to handle a code with jump + * tables. When the kernel uses retpolines, the check should in + * theory additionally look for jumps to indirect thunks. + * However, the kernel built with retpolines or IBT has jump + * tables disabled so the check can be skipped altogether. + */ + if (!IS_ENABLED(CONFIG_RETPOLINE) && + !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && + insn_is_indirect_jump(&insn)) + return 0; + if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, DISP32_SIZE)) return 0; addr += insn.length; diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index b70670a98597..77a9316da435 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -186,6 +186,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1885326a8f65..4a817d20ce3b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); - - amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index ef06211bae4c..83d41c2601d7 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -133,27 +133,25 @@ SECTIONS KPROBES_TEXT SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE - __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) - *(.text.__x86.return_thunk) - __indirect_thunk_end = .; + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) #endif STATIC_CALL_TEXT ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO - *(.text.__x86.rethunk_untrain) + *(.text..__x86.rethunk_untrain) #endif ENTRY_TEXT #ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); - *(.text.__x86.rethunk_safe) + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END *(.gnu.warning) @@ -523,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif @@ -538,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 03e852dedcc1..d4bfdc607fe7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4006,6 +4006,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in guest_state_enter_irqoff(); + amd_clear_divider(); + if (sev_es_guest(vcpu->kvm)) __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); else diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 265452fc9ebe..ef2ebabb059c 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run) * because interrupt handlers won't sanitize 'ret' if the return is * from the kernel. */ - UNTRAIN_RET - - /* SRSO */ - ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + UNTRAIN_RET_VM /* * Clear all general purpose registers except RSP and RAX to prevent @@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) * because interrupt handlers won't sanitize RET if the return is * from the kernel. */ - UNTRAIN_RET + UNTRAIN_RET_VM /* "Pop" @spec_ctrl_intercepted. */ pop %_ASM_BX diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 2cff585f22f2..cd86aeb5fdd3 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -13,7 +13,7 @@ #include <asm/frame.h> #include <asm/nops.h> - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro POLINE reg @@ -133,75 +133,106 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) #ifdef CONFIG_RETHUNK /* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence - jmp __x86_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) - - .section .text.__x86.rethunk_safe + jmp srso_alias_return_thunk +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) + + .section .text..__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_alias_untrain_ret) #endif -/* Needs a definition for the __x86_return_thunk alternative below. */ -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO - add $8, %_ASM_SP +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) + lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret) - .section .text.__x86.return_thunk + .section .text..__x86.return_thunk + +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_alias_safe_ret + ud2 +SYM_CODE_END(srso_alias_return_thunk) + +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */ /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc -SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -213,13 +244,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -230,16 +261,16 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret) /* - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * - * movabs $0xccccccc308c48348,%rax + * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and @@ -251,22 +282,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR .byte 0x48, 0xb8 +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 - int3 + /* end of movabs */ lfence call srso_safe_ret - int3 + ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_FUNC_START(__x86_return_thunk) - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS +SYM_CODE_START(srso_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret + ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 2d51fa8da9e8..c0f25d00181e 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -824,8 +824,11 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk") || - !strcmp(sym->name, "srso_untrain_ret") || - !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "__ret"); + return !strcmp(sym->name, "__x86_return_thunk"); +} + +bool arch_is_embedded_insn(struct symbol *sym) +{ + return !strcmp(sym->name, "retbleed_return_thunk") || + !strcmp(sym->name, "srso_safe_ret"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 8936a05f0e5a..1384090530db 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -389,7 +389,7 @@ static int decode_instructions(struct objtool_file *file) if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || !strcmp(sec->name, ".cpuidle.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true; /* @@ -455,7 +455,7 @@ static int decode_instructions(struct objtool_file *file) return -1; } - if (func->return_thunk || func->alias != func) + if (func->embedded_insn || func->alias != func) continue; if (!find_insn(file, sec, func->offset)) { @@ -1288,16 +1288,33 @@ static int add_ignore_alternatives(struct objtool_file *file) return 0; } +/* + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol + * will be added to the .retpoline_sites section. + */ __weak bool arch_is_retpoline(struct symbol *sym) { return false; } +/* + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol + * will be added to the .return_sites section. + */ __weak bool arch_is_rethunk(struct symbol *sym) { return false; } +/* + * Symbols that are embedded inside other instructions, because sometimes crazy + * code exists. These are mostly ignored for validation purposes. + */ +__weak bool arch_is_embedded_insn(struct symbol *sym) +{ + return false; +} + static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) { struct reloc *reloc; @@ -1576,14 +1593,14 @@ static int add_jump_destinations(struct objtool_file *file) struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); /* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the * middle of another instruction. Objtool only * knows about the outer instruction. */ - if (sym && sym->return_thunk) { + if (sym && sym->embedded_insn) { add_return_call(file, insn, false); continue; } @@ -2502,6 +2519,9 @@ static int classify_symbols(struct objtool_file *file) if (arch_is_rethunk(func)) func->return_thunk = true; + if (arch_is_embedded_insn(func)) + func->embedded_insn = true; + if (arch_ftrace_match(func->name)) func->fentry = true; @@ -2630,12 +2650,17 @@ static int decode_sections(struct objtool_file *file) return 0; } -static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn_call_dest(insn) && - insn_call_dest(insn)->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn_call_dest(insn); + + if (!dest) + return false; + + if (dest->fentry || dest->embedded_insn) + return true; + } return false; } @@ -3636,7 +3661,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (ret) return ret; - if (opts.stackval && func && !is_fentry_call(insn) && + if (opts.stackval && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_INSN(insn, "call without frame pointer save/setup"); return 1; diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 2b6d2ce4f9a5..0b303eba660e 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base); bool arch_is_retpoline(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym); +bool arch_is_embedded_insn(struct symbol *sym); int arch_rewrite_retpolines(struct objtool_file *file); diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index c532d70864dc..9f71e988eca4 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -66,6 +66,7 @@ struct symbol { u8 fentry : 1; u8 profiling_func : 1; u8 warned : 1; + u8 embedded_insn : 1; struct list_head pv_target; struct reloc *relocs; }; diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index 374d142e7390..c6a0a27b12c2 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c @@ -1038,9 +1038,7 @@ static int thread_stack__trace_end(struct thread_stack *ts, static bool is_x86_retpoline(const char *name) { - const char *p = strstr(name, "__x86_indirect_thunk_"); - - return p == name || !strcmp(name, "__indirect_thunk_start"); + return strstr(name, "__x86_indirect_thunk_") == name; } /* |