diff options
author | Ard Biesheuvel <ardb@kernel.org> | 2022-11-22 17:10:13 +0100 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2022-11-24 08:57:41 +0100 |
commit | b5d854cd4b6a314edd6c15dabc4233b84a0f8e5e (patch) | |
tree | dde32200057b6e4a7fb77e7eddeb682e8b6be078 /arch | |
parent | x86/boot/compressed: Move startup32_load_idt() out of head_64.S (diff) | |
download | linux-b5d854cd4b6a314edd6c15dabc4233b84a0f8e5e.tar.xz linux-b5d854cd4b6a314edd6c15dabc4233b84a0f8e5e.zip |
x86/boot/compressed: Move startup32_check_sev_cbit() into .text
Move startup32_check_sev_cbit() into the .text section and turn it into
an ordinary function using the ordinary 32-bit calling convention,
instead of saving/restoring the registers that are known to be live at
the only call site. This improves maintainability, and makes it possible
to move this function out of head_64.S and into a separate compilation
unit that is specific to memory encryption.
Note that this requires the call site to be moved before the mixed mode
check, as %eax will be live otherwise.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221122161017.2426828-14-ardb@kernel.org
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 35 |
1 files changed, 19 insertions, 16 deletions
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 16cccc2d9418..30ba541ddf30 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -251,6 +251,11 @@ SYM_FUNC_START(startup_32) movl $__BOOT_TSS, %eax ltr %ax +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* Check if the C-bit position is correct when SEV is active */ + call startup32_check_sev_cbit +#endif + /* * Setup for the jump to 64bit mode * @@ -268,8 +273,6 @@ SYM_FUNC_START(startup_32) leal rva(startup_64_mixed_mode)(%ebp), %eax 1: #endif - /* Check if the C-bit position is correct when SEV is active */ - call startup32_check_sev_cbit pushl $__KERNEL_CS pushl %eax @@ -724,16 +727,17 @@ SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end) * succeed. An incorrect C-bit position will map all memory unencrypted, so that * the compare will use the encrypted random data and fail. */ - __HEAD -SYM_FUNC_START(startup32_check_sev_cbit) #ifdef CONFIG_AMD_MEM_ENCRYPT - pushl %eax + .text +SYM_FUNC_START(startup32_check_sev_cbit) pushl %ebx - pushl %ecx - pushl %edx + pushl %ebp + + call 0f +0: popl %ebp /* Check for non-zero sev_status */ - movl rva(sev_status)(%ebp), %eax + movl (sev_status - 0b)(%ebp), %eax testl %eax, %eax jz 4f @@ -748,17 +752,18 @@ SYM_FUNC_START(startup32_check_sev_cbit) jnc 2b /* Store to memory and keep it in the registers */ - movl %eax, rva(sev_check_data)(%ebp) - movl %ebx, rva(sev_check_data+4)(%ebp) + leal (sev_check_data - 0b)(%ebp), %ebp + movl %eax, 0(%ebp) + movl %ebx, 4(%ebp) /* Enable paging to see if encryption is active */ movl %cr0, %edx /* Backup %cr0 in %edx */ movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */ movl %ecx, %cr0 - cmpl %eax, rva(sev_check_data)(%ebp) + cmpl %eax, 0(%ebp) jne 3f - cmpl %ebx, rva(sev_check_data+4)(%ebp) + cmpl %ebx, 4(%ebp) jne 3f movl %edx, %cr0 /* Restore previous %cr0 */ @@ -770,13 +775,11 @@ SYM_FUNC_START(startup32_check_sev_cbit) jmp 3b 4: - popl %edx - popl %ecx + popl %ebp popl %ebx - popl %eax -#endif RET SYM_FUNC_END(startup32_check_sev_cbit) +#endif /* * Stack and heap for uncompression |