diff options
author | Will Deacon <will.deacon@arm.com> | 2015-05-01 14:48:17 +0200 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2015-05-05 13:21:52 +0200 |
commit | b9a95e85bbc56f168f078885f414f305b4589c4b (patch) | |
tree | cc6acb0ea753833355d3db50ab2b114997fb542f /arch/arm64/kernel | |
parent | Linux 4.1-rc2 (diff) | |
download | linux-b9a95e85bbc56f168f078885f414f305b4589c4b.tar.xz linux-b9a95e85bbc56f168f078885f414f305b4589c4b.zip |
Revert "arm64: alternative: Allow immediate branch as alternative instruction"
This reverts most of commit fef7f2b2010381c795ae43743ad31931cc58f5ad.
It turns out that there are a couple of problems with the way we're
fixing up branch instructions used as part of alternative instruction
sequences:
(1) If the branch target is also in the alternative sequence, we'll
generate a branch into the .altinstructions section which actually
gets freed.
(2) The calls to aarch64_insn_{read,write} bring an awful lot more
code into the patching path (e.g. taking locks, poking the fixmap,
invalidating the TLB) which isn't actually needed for the early
patching run under stop_machine, but makes the use of alternative
sequences extremely fragile (as we can't patch code that could be
used by the patching code).
Given that no code actually requires alternative patching of immediate
branches, let's remove this support for now and revisit it when we've
got a user. We leave the updated size check, since we really do require
the sequences to be the same length.
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/alternative.c | 53 |
1 files changed, 1 insertions, 52 deletions
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index 21033bba9390..28f8365edc4c 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -24,7 +24,6 @@ #include <asm/cacheflush.h> #include <asm/alternative.h> #include <asm/cpufeature.h> -#include <asm/insn.h> #include <linux/stop_machine.h> extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; @@ -34,48 +33,6 @@ struct alt_region { struct alt_instr *end; }; -/* - * Decode the imm field of a b/bl instruction, and return the byte - * offset as a signed value (so it can be used when computing a new - * branch target). - */ -static s32 get_branch_offset(u32 insn) -{ - s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); - - /* sign-extend the immediate before turning it into a byte offset */ - return (imm << 6) >> 4; -} - -static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr) -{ - u32 insn; - - aarch64_insn_read(altinsnptr, &insn); - - /* Stop the world on instructions we don't support... */ - BUG_ON(aarch64_insn_is_cbz(insn)); - BUG_ON(aarch64_insn_is_cbnz(insn)); - BUG_ON(aarch64_insn_is_bcond(insn)); - /* ... and there is probably more. */ - - if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { - enum aarch64_insn_branch_type type; - unsigned long target; - - if (aarch64_insn_is_b(insn)) - type = AARCH64_INSN_BRANCH_NOLINK; - else - type = AARCH64_INSN_BRANCH_LINK; - - target = (unsigned long)altinsnptr + get_branch_offset(insn); - insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr, - target, type); - } - - return insn; -} - static int __apply_alternatives(void *alt_region) { struct alt_instr *alt; @@ -83,9 +40,6 @@ static int __apply_alternatives(void *alt_region) u8 *origptr, *replptr; for (alt = region->begin; alt < region->end; alt++) { - u32 insn; - int i; - if (!cpus_have_cap(alt->cpufeature)) continue; @@ -95,12 +49,7 @@ static int __apply_alternatives(void *alt_region) origptr = (u8 *)&alt->orig_offset + alt->orig_offset; replptr = (u8 *)&alt->alt_offset + alt->alt_offset; - - for (i = 0; i < alt->alt_len; i += sizeof(insn)) { - insn = get_alt_insn(origptr + i, replptr + i); - aarch64_insn_write(origptr + i, insn); - } - + memcpy(origptr, replptr, alt->alt_len); flush_icache_range((uintptr_t)origptr, (uintptr_t)(origptr + alt->alt_len)); } |