diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2018-09-19 08:51:41 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2018-09-27 17:56:48 +0200 |
commit | b34006c4258c9c86597b6b7123d6a9a3513d6cd7 (patch) | |
tree | a853b8946da55ea47b86f862ae0a80fd5f098565 /arch/x86/include/asm/jump_label.h | |
parent | x86/jump_label: Switch to jump_entry accessors (diff) | |
download | linux-b34006c4258c9c86597b6b7123d6a9a3513d6cd7.tar.xz linux-b34006c4258c9c86597b6b7123d6a9a3513d6cd7.zip |
x86/jump_table: Use relative references
Similar to the arm64 case, 64-bit x86 can benefit from using relative
references rather than absolute ones when emitting struct jump_entry
instances. Not only does this reduce the memory footprint of the entries
themselves by 33%, it also removes the need for carrying relocation
metadata on relocatable builds (i.e., for KASLR) which saves a fair
chunk of .init space as well (although the savings are not as dramatic
as on arm64)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-s390@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Jessica Yu <jeyu@kernel.org>
Link: https://lkml.kernel.org/r/20180919065144.25010-7-ard.biesheuvel@linaro.org
Diffstat (limited to 'arch/x86/include/asm/jump_label.h')
-rw-r--r-- | arch/x86/include/asm/jump_label.h | 24 |
1 files changed, 8 insertions, 16 deletions
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h index 8c0de4282659..21efc9d07ed9 100644 --- a/arch/x86/include/asm/jump_label.h +++ b/arch/x86/include/asm/jump_label.h @@ -37,7 +37,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" ".pushsection __jump_table, \"aw\" \n\t" _ASM_ALIGN "\n\t" - _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t" + ".long 1b - ., %l[l_yes] - . \n\t" + _ASM_PTR "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (key), "i" (branch) : : l_yes); @@ -53,7 +54,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool "2:\n\t" ".pushsection __jump_table, \"aw\" \n\t" _ASM_ALIGN "\n\t" - _ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t" + ".long 1b - ., %l[l_yes] - . \n\t" + _ASM_PTR "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i" (key), "i" (branch) : : l_yes); @@ -62,18 +64,6 @@ l_yes: return true; } -#ifdef CONFIG_X86_64 -typedef u64 jump_label_t; -#else -typedef u32 jump_label_t; -#endif - -struct jump_entry { - jump_label_t code; - jump_label_t target; - jump_label_t key; -}; - #else /* __ASSEMBLY__ */ .macro STATIC_JUMP_IF_TRUE target, key, def @@ -88,7 +78,8 @@ struct jump_entry { .endif .pushsection __jump_table, "aw" _ASM_ALIGN - _ASM_PTR .Lstatic_jump_\@, \target, \key + .long .Lstatic_jump_\@ - ., \target - . + _ASM_PTR \key - . .popsection .endm @@ -104,7 +95,8 @@ struct jump_entry { .endif .pushsection __jump_table, "aw" _ASM_ALIGN - _ASM_PTR .Lstatic_jump_\@, \target, \key + 1 + .long .Lstatic_jump_\@ - ., \target - . + _ASM_PTR \key + 1 - . .popsection .endm |