summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/acpi
diff options
context:
space:
mode:
authorUros Bizjak <ubizjak@gmail.com>2023-11-03 11:48:22 +0100
committerIngo Molnar <mingo@kernel.org>2023-11-30 20:09:49 +0100
commit0978d64f9406122c369d5f46e1eb855646f6c32c (patch)
tree569639ca1a93adaf5fd067d931e580f28ea13457 /arch/x86/kernel/acpi
parentx86/callthunks: Fix and unify call thunks assembly snippets (diff)
downloadlinux-0978d64f9406122c369d5f46e1eb855646f6c32c.tar.xz
linux-0978d64f9406122c369d5f46e1eb855646f6c32c.zip
x86/acpi: Use %rip-relative addressing in wakeup_64.S
This is a "nice-to-have" change with minor code generation benefits: - Instruction with %rip-relative address operand is one byte shorter than its absolute address counterpart, - it is also compatible with position independent executable (-fpie) builds, - it is also consistent with what the compiler emits by default when a symbol is accessed. No functional changes intended. Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Rafael J. Wysocki <rafael@kernel.org> Link: https://lore.kernel.org/r/20231103104900.409470-1-ubizjak@gmail.com
Diffstat (limited to 'arch/x86/kernel/acpi')
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S24
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index d5d8a352eafa..94ff83f3d3fe 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -17,7 +17,7 @@
* Hooray, we are in Long 64-bit mode (but still running in low memory)
*/
SYM_FUNC_START(wakeup_long64)
- movq saved_magic, %rax
+ movq saved_magic(%rip), %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
je 2f
@@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
- movq saved_rsp, %rsp
+ movq saved_rsp(%rip), %rsp
- movq saved_rbx, %rbx
- movq saved_rdi, %rdi
- movq saved_rsi, %rsi
- movq saved_rbp, %rbp
+ movq saved_rbx(%rip), %rbx
+ movq saved_rdi(%rip), %rdi
+ movq saved_rsi(%rip), %rsi
+ movq saved_rbp(%rip), %rbp
- movq saved_rip, %rax
+ movq saved_rip(%rip), %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
SYM_FUNC_END(wakeup_long64)
@@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
movq $.Lresume_point, saved_rip(%rip)
- movq %rsp, saved_rsp
- movq %rbp, saved_rbp
- movq %rbx, saved_rbx
- movq %rdi, saved_rdi
- movq %rsi, saved_rsi
+ movq %rsp, saved_rsp(%rip)
+ movq %rbp, saved_rbp(%rip)
+ movq %rbx, saved_rbx(%rip)
+ movq %rdi, saved_rdi(%rip)
+ movq %rsi, saved_rsi(%rip)
addq $8, %rsp
movl $3, %edi