From a0266c214fab21371a499e6ab1c9385cc6589189 Mon Sep 17 00:00:00 2001 From: Wang Nan Date: Mon, 5 Jan 2015 19:29:25 +0800 Subject: ARM: kprobes: disallow probing stack consuming instructions This patch prohibits probing instructions for which the stack requirements are unable to be determined statically. Some test cases are found not work again after the modification, this patch also removes them. Signed-off-by: Wang Nan Reviewed-by: Jon Medhurst Signed-off-by: Jon Medhurst --- arch/arm/include/asm/kprobes.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/include/asm/kprobes.h') diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 49fa0dfaad33..56f9ac68fbd1 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -22,7 +22,6 @@ #define __ARCH_WANT_KPROBES_INSN_SLOT #define MAX_INSN_SIZE 2 -#define MAX_STACK_SIZE 64 /* 32 would probably be OK */ #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 -- cgit v1.2.3 From 0dc016dbd820260b8ea74337980735b8c88d4ef2 Mon Sep 17 00:00:00 2001 From: Wang Nan Date: Fri, 9 Jan 2015 14:37:36 +0800 Subject: ARM: kprobes: enable OPTPROBES for ARM 32 This patch introduce kprobeopt for ARM 32. Limitations: - Currently only kernel compiled with ARM ISA is supported. - Offset between probe point and optinsn slot must not larger than 32MiB. Masami Hiramatsu suggests replacing 2 words, it will make things complex. Futher patch can make such optimization. Kprobe opt on ARM is relatively simpler than kprobe opt on x86 because ARM instruction is always 4 bytes aligned and 4 bytes long. This patch replace probed instruction by a 'b', branch to trampoline code and then calls optimized_callback(). optimized_callback() calls opt_pre_handler() to execute kprobe handler. It also emulate/simulate replaced instruction. When unregistering kprobe, the deferred manner of unoptimizer may leave branch instruction before optimizer is called. Different from x86_64, which only copy the probed insn after optprobe_template_end and reexecute them, this patch call singlestep to emulate/simulate the insn directly. Futher patch can optimize this behavior. Signed-off-by: Wang Nan Acked-by: Masami Hiramatsu Cc: Will Deacon Reviewed-by: Jon Medhurst (Tixy) Signed-off-by: Jon Medhurst --- arch/arm/Kconfig | 1 + arch/arm/include/asm/insn.h | 29 ++++ arch/arm/include/asm/kprobes.h | 29 ++++ arch/arm/kernel/Makefile | 2 +- arch/arm/kernel/ftrace.c | 3 +- arch/arm/kernel/insn.h | 29 ---- arch/arm/kernel/jump_label.c | 3 +- arch/arm/probes/kprobes/Makefile | 1 + arch/arm/probes/kprobes/core.c | 26 ++- arch/arm/probes/kprobes/core.h | 2 + arch/arm/probes/kprobes/opt-arm.c | 322 ++++++++++++++++++++++++++++++++++++++ 11 files changed, 406 insertions(+), 41 deletions(-) create mode 100644 arch/arm/include/asm/insn.h delete mode 100644 arch/arm/kernel/insn.h create mode 100644 arch/arm/probes/kprobes/opt-arm.c (limited to 'arch/arm/include/asm/kprobes.h') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 97d07ed60a0b..3d5dc2df835b 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -60,6 +60,7 @@ config ARM select HAVE_MEMBLOCK select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND select HAVE_OPROFILE if (HAVE_PERF_EVENTS) + select HAVE_OPTPROBES if !THUMB2_KERNEL select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP diff --git a/arch/arm/include/asm/insn.h b/arch/arm/include/asm/insn.h new file mode 100644 index 000000000000..e96065da4dae --- /dev/null +++ b/arch/arm/include/asm/insn.h @@ -0,0 +1,29 @@ +#ifndef __ASM_ARM_INSN_H +#define __ASM_ARM_INSN_H + +static inline unsigned long +arm_gen_nop(void) +{ +#ifdef CONFIG_THUMB2_KERNEL + return 0xf3af8000; /* nop.w */ +#else + return 0xe1a00000; /* mov r0, r0 */ +#endif +} + +unsigned long +__arm_gen_branch(unsigned long pc, unsigned long addr, bool link); + +static inline unsigned long +arm_gen_branch(unsigned long pc, unsigned long addr) +{ + return __arm_gen_branch(pc, addr, false); +} + +static inline unsigned long +arm_gen_branch_link(unsigned long pc, unsigned long addr) +{ + return __arm_gen_branch(pc, addr, true); +} + +#endif diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 56f9ac68fbd1..50ff3bc7928e 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -50,5 +50,34 @@ int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); +/* optinsn template addresses */ +extern __visible kprobe_opcode_t optprobe_template_entry; +extern __visible kprobe_opcode_t optprobe_template_val; +extern __visible kprobe_opcode_t optprobe_template_call; +extern __visible kprobe_opcode_t optprobe_template_end; +extern __visible kprobe_opcode_t optprobe_template_sub_sp; +extern __visible kprobe_opcode_t optprobe_template_add_sp; + +#define MAX_OPTIMIZED_LENGTH 4 +#define MAX_OPTINSN_SIZE \ + ((unsigned long)&optprobe_template_end - \ + (unsigned long)&optprobe_template_entry) +#define RELATIVEJUMP_SIZE 4 + +struct arch_optimized_insn { + /* + * copy of the original instructions. + * Different from x86, ARM kprobe_opcode_t is u32. + */ +#define MAX_COPIED_INSN DIV_ROUND_UP(RELATIVEJUMP_SIZE, sizeof(kprobe_opcode_t)) + kprobe_opcode_t copied_insn[MAX_COPIED_INSN]; + /* detour code buffer */ + kprobe_opcode_t *insn; + /* + * We always copy one instruction on ARM, + * so size will always be 4, and unlike x86, there is no + * need for a size field. + */ +}; #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 9c51a433e025..902397dd1000 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o # Main staffs in KPROBES are in arch/arm/probes/ . -obj-$(CONFIG_KPROBES) += patch.o +obj-$(CONFIG_KPROBES) += patch.o insn.o obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o obj-$(CONFIG_ARM_THUMBEE) += thumbee.o obj-$(CONFIG_KGDB) += kgdb.o patch.o diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index b8c75e45a950..709ee1d6d4df 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -20,8 +20,7 @@ #include #include #include - -#include "insn.h" +#include #ifdef CONFIG_THUMB2_KERNEL #define NOP 0xf85deb04 /* pop.w {lr} */ diff --git a/arch/arm/kernel/insn.h b/arch/arm/kernel/insn.h deleted file mode 100644 index e96065da4dae..000000000000 --- a/arch/arm/kernel/insn.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __ASM_ARM_INSN_H -#define __ASM_ARM_INSN_H - -static inline unsigned long -arm_gen_nop(void) -{ -#ifdef CONFIG_THUMB2_KERNEL - return 0xf3af8000; /* nop.w */ -#else - return 0xe1a00000; /* mov r0, r0 */ -#endif -} - -unsigned long -__arm_gen_branch(unsigned long pc, unsigned long addr, bool link); - -static inline unsigned long -arm_gen_branch(unsigned long pc, unsigned long addr) -{ - return __arm_gen_branch(pc, addr, false); -} - -static inline unsigned long -arm_gen_branch_link(unsigned long pc, unsigned long addr) -{ - return __arm_gen_branch(pc, addr, true); -} - -#endif diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c index d8da075959bf..e39cbf488cfe 100644 --- a/arch/arm/kernel/jump_label.c +++ b/arch/arm/kernel/jump_label.c @@ -1,8 +1,7 @@ #include #include #include - -#include "insn.h" +#include #ifdef HAVE_JUMP_LABEL diff --git a/arch/arm/probes/kprobes/Makefile b/arch/arm/probes/kprobes/Makefile index bc8d504c3d78..76a36bf102b7 100644 --- a/arch/arm/probes/kprobes/Makefile +++ b/arch/arm/probes/kprobes/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_KPROBES) += actions-thumb.o checkers-thumb.o test-kprobes-objs += test-thumb.o else obj-$(CONFIG_KPROBES) += actions-arm.o checkers-arm.o +obj-$(CONFIG_OPTPROBES) += opt-arm.o test-kprobes-objs += test-arm.o endif diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c index 3a58db4cc1c6..a4ec240ee7ba 100644 --- a/arch/arm/probes/kprobes/core.c +++ b/arch/arm/probes/kprobes/core.c @@ -163,19 +163,31 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) * memory. It is also needed to atomically set the two half-words of a 32-bit * Thumb breakpoint. */ -int __kprobes __arch_disarm_kprobe(void *p) -{ - struct kprobe *kp = p; - void *addr = (void *)((uintptr_t)kp->addr & ~1); - - __patch_text(addr, kp->opcode); +struct patch { + void *addr; + unsigned int insn; +}; +static int __kprobes_remove_breakpoint(void *data) +{ + struct patch *p = data; + __patch_text(p->addr, p->insn); return 0; } +void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn) +{ + struct patch p = { + .addr = addr, + .insn = insn, + }; + stop_machine(__kprobes_remove_breakpoint, &p, cpu_online_mask); +} + void __kprobes arch_disarm_kprobe(struct kprobe *p) { - stop_machine(__arch_disarm_kprobe, p, cpu_online_mask); + kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1), + p->opcode); } void __kprobes arch_remove_kprobe(struct kprobe *p) diff --git a/arch/arm/probes/kprobes/core.h b/arch/arm/probes/kprobes/core.h index f88c79fe632a..b3036c587a76 100644 --- a/arch/arm/probes/kprobes/core.h +++ b/arch/arm/probes/kprobes/core.h @@ -30,6 +30,8 @@ #define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18 #define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018 +extern void kprobes_remove_breakpoint(void *addr, unsigned int insn); + enum probes_insn __kprobes kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_probes_insn *asi, const struct decode_header *h); diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c new file mode 100644 index 000000000000..13d5232118df --- /dev/null +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -0,0 +1,322 @@ +/* + * Kernel Probes Jump Optimization (Optprobes) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004 + * Copyright (C) Hitachi Ltd., 2012 + * Copyright (C) Huawei Inc., 2014 + */ + +#include +#include +#include +#include +/* for arm_gen_branch */ +#include +/* for patch_text */ +#include + +#include "core.h" + +/* + * NOTE: the first sub and add instruction will be modified according + * to the stack cost of the instruction. + */ +asm ( + ".global optprobe_template_entry\n" + "optprobe_template_entry:\n" + ".global optprobe_template_sub_sp\n" + "optprobe_template_sub_sp:" + " sub sp, sp, #0xff\n" + " stmia sp, {r0 - r14} \n" + ".global optprobe_template_add_sp\n" + "optprobe_template_add_sp:" + " add r3, sp, #0xff\n" + " str r3, [sp, #52]\n" + " mrs r4, cpsr\n" + " str r4, [sp, #64]\n" + " mov r1, sp\n" + " ldr r0, 1f\n" + " ldr r2, 2f\n" + /* + * AEABI requires an 8-bytes alignment stack. If + * SP % 8 != 0 (SP % 4 == 0 should be ensured), + * alloc more bytes here. + */ + " and r4, sp, #4\n" + " sub sp, sp, r4\n" +#if __LINUX_ARM_ARCH__ >= 5 + " blx r2\n" +#else + " mov lr, pc\n" + " mov pc, r2\n" +#endif + " add sp, sp, r4\n" + " ldr r1, [sp, #64]\n" + " tst r1, #"__stringify(PSR_T_BIT)"\n" + " ldrne r2, [sp, #60]\n" + " orrne r2, #1\n" + " strne r2, [sp, #60] @ set bit0 of PC for thumb\n" + " msr cpsr_cxsf, r1\n" + " ldmia sp, {r0 - r15}\n" + ".global optprobe_template_val\n" + "optprobe_template_val:\n" + "1: .long 0\n" + ".global optprobe_template_call\n" + "optprobe_template_call:\n" + "2: .long 0\n" + ".global optprobe_template_end\n" + "optprobe_template_end:\n"); + +#define TMPL_VAL_IDX \ + ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry) +#define TMPL_CALL_IDX \ + ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry) +#define TMPL_END_IDX \ + ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry) +#define TMPL_ADD_SP \ + ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry) +#define TMPL_SUB_SP \ + ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry) + +/* + * ARM can always optimize an instruction when using ARM ISA, except + * instructions like 'str r0, [sp, r1]' which store to stack and unable + * to determine stack space consumption statically. + */ +int arch_prepared_optinsn(struct arch_optimized_insn *optinsn) +{ + return optinsn->insn != NULL; +} + +/* + * In ARM ISA, kprobe opt always replace one instruction (4 bytes + * aligned and 4 bytes long). It is impossible to encounter another + * kprobe in the address range. So always return 0. + */ +int arch_check_optimized_kprobe(struct optimized_kprobe *op) +{ + return 0; +} + +/* Caller must ensure addr & 3 == 0 */ +static int can_optimize(struct kprobe *kp) +{ + if (kp->ainsn.stack_space < 0) + return 0; + /* + * 255 is the biggest imm can be used in 'sub r0, r0, #'. + * Number larger than 255 needs special encoding. + */ + if (kp->ainsn.stack_space > 255 - sizeof(struct pt_regs)) + return 0; + return 1; +} + +/* Free optimized instruction slot */ +static void +__arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) +{ + if (op->optinsn.insn) { + free_optinsn_slot(op->optinsn.insn, dirty); + op->optinsn.insn = NULL; + } +} + +extern void kprobe_handler(struct pt_regs *regs); + +static void +optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) +{ + unsigned long flags; + struct kprobe *p = &op->kp; + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + /* Save skipped registers */ + regs->ARM_pc = (unsigned long)op->kp.addr; + regs->ARM_ORIG_r0 = ~0UL; + + local_irq_save(flags); + + if (kprobe_running()) { + kprobes_inc_nmissed_count(&op->kp); + } else { + __this_cpu_write(current_kprobe, &op->kp); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + opt_pre_handler(&op->kp, regs); + __this_cpu_write(current_kprobe, NULL); + } + + /* In each case, we must singlestep the replaced instruction. */ + op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs); + + local_irq_restore(flags); +} + +int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) +{ + kprobe_opcode_t *code; + unsigned long rel_chk; + unsigned long val; + unsigned long stack_protect = sizeof(struct pt_regs); + + if (!can_optimize(orig)) + return -EILSEQ; + + code = get_optinsn_slot(); + if (!code) + return -ENOMEM; + + /* + * Verify if the address gap is in 32MiB range, because this uses + * a relative jump. + * + * kprobe opt use a 'b' instruction to branch to optinsn.insn. + * According to ARM manual, branch instruction is: + * + * 31 28 27 24 23 0 + * +------+---+---+---+---+----------------+ + * | cond | 1 | 0 | 1 | 0 | imm24 | + * +------+---+---+---+---+----------------+ + * + * imm24 is a signed 24 bits integer. The real branch offset is computed + * by: imm32 = SignExtend(imm24:'00', 32); + * + * So the maximum forward branch should be: + * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc + * The maximum backword branch should be: + * (0xff800000 << 2) = 0xfe000000 = -0x2000000 + * + * We can simply check (rel & 0xfe000003): + * if rel is positive, (rel & 0xfe000000) shoule be 0 + * if rel is negitive, (rel & 0xfe000000) should be 0xfe000000 + * the last '3' is used for alignment checking. + */ + rel_chk = (unsigned long)((long)code - + (long)orig->addr + 8) & 0xfe000003; + + if ((rel_chk != 0) && (rel_chk != 0xfe000000)) { + /* + * Different from x86, we free code buf directly instead of + * calling __arch_remove_optimized_kprobe() because + * we have not fill any field in op. + */ + free_optinsn_slot(code, 0); + return -ERANGE; + } + + /* Copy arch-dep-instance from template. */ + memcpy(code, &optprobe_template_entry, + TMPL_END_IDX * sizeof(kprobe_opcode_t)); + + /* Adjust buffer according to instruction. */ + BUG_ON(orig->ainsn.stack_space < 0); + + stack_protect += orig->ainsn.stack_space; + + /* Should have been filtered by can_optimize(). */ + BUG_ON(stack_protect > 255); + + /* Create a 'sub sp, sp, #' */ + code[TMPL_SUB_SP] = __opcode_to_mem_arm(0xe24dd000 | stack_protect); + /* Create a 'add r3, sp, #' */ + code[TMPL_ADD_SP] = __opcode_to_mem_arm(0xe28d3000 | stack_protect); + + /* Set probe information */ + val = (unsigned long)op; + code[TMPL_VAL_IDX] = val; + + /* Set probe function call */ + val = (unsigned long)optimized_callback; + code[TMPL_CALL_IDX] = val; + + flush_icache_range((unsigned long)code, + (unsigned long)(&code[TMPL_END_IDX])); + + /* Set op->optinsn.insn means prepared. */ + op->optinsn.insn = code; + return 0; +} + +void __kprobes arch_optimize_kprobes(struct list_head *oplist) +{ + struct optimized_kprobe *op, *tmp; + + list_for_each_entry_safe(op, tmp, oplist, list) { + unsigned long insn; + WARN_ON(kprobe_disabled(&op->kp)); + + /* + * Backup instructions which will be replaced + * by jump address + */ + memcpy(op->optinsn.copied_insn, op->kp.addr, + RELATIVEJUMP_SIZE); + + insn = arm_gen_branch((unsigned long)op->kp.addr, + (unsigned long)op->optinsn.insn); + BUG_ON(insn == 0); + + /* + * Make it a conditional branch if replaced insn + * is consitional + */ + insn = (__mem_to_opcode_arm( + op->optinsn.copied_insn[0]) & 0xf0000000) | + (insn & 0x0fffffff); + + /* + * Similar to __arch_disarm_kprobe, operations which + * removing breakpoints must be wrapped by stop_machine + * to avoid racing. + */ + kprobes_remove_breakpoint(op->kp.addr, insn); + + list_del_init(&op->list); + } +} + +void arch_unoptimize_kprobe(struct optimized_kprobe *op) +{ + arch_arm_kprobe(&op->kp); +} + +/* + * Recover original instructions and breakpoints from relative jumps. + * Caller must call with locking kprobe_mutex. + */ +void arch_unoptimize_kprobes(struct list_head *oplist, + struct list_head *done_list) +{ + struct optimized_kprobe *op, *tmp; + + list_for_each_entry_safe(op, tmp, oplist, list) { + arch_unoptimize_kprobe(op); + list_move(&op->list, done_list); + } +} + +int arch_within_optimized_kprobe(struct optimized_kprobe *op, + unsigned long addr) +{ + return ((unsigned long)op->kp.addr <= addr && + (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr); +} + +void arch_remove_optimized_kprobe(struct optimized_kprobe *op) +{ + __arch_remove_optimized_kprobe(op, 1); +} -- cgit v1.2.3 From bfc9657d752c47d59dc0bab85ebdc19cf60100dd Mon Sep 17 00:00:00 2001 From: Wang Nan Date: Mon, 5 Jan 2015 19:34:47 +0800 Subject: ARM: optprobes: execute instruction during restoring if possible. This patch removes software emulation or simulation for most of probed instructions. If the instruction doesn't use PC relative addressing, it will be translated into following instructions in the restore code in code template: ldmia {r0 - r14} // restore all instruction except PC // direct execute the probed instruction b next_insn // branch to next instruction. Signed-off-by: Wang Nan Reviewed-by: Masami Hiramatsu Signed-off-by: Jon Medhurst --- arch/arm/include/asm/kprobes.h | 3 +++ arch/arm/include/asm/probes.h | 1 + arch/arm/probes/kprobes/opt-arm.c | 52 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 54 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm/kprobes.h') diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 50ff3bc7928e..3ea9be559726 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -57,6 +57,9 @@ extern __visible kprobe_opcode_t optprobe_template_call; extern __visible kprobe_opcode_t optprobe_template_end; extern __visible kprobe_opcode_t optprobe_template_sub_sp; extern __visible kprobe_opcode_t optprobe_template_add_sp; +extern __visible kprobe_opcode_t optprobe_template_restore_begin; +extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn; +extern __visible kprobe_opcode_t optprobe_template_restore_end; #define MAX_OPTIMIZED_LENGTH 4 #define MAX_OPTINSN_SIZE \ diff --git a/arch/arm/include/asm/probes.h b/arch/arm/include/asm/probes.h index b668e60f759c..1e5b9bb92270 100644 --- a/arch/arm/include/asm/probes.h +++ b/arch/arm/include/asm/probes.h @@ -42,6 +42,7 @@ struct arch_probes_insn { probes_insn_fn_t *insn_fn; int stack_space; unsigned long register_usage_flags; + bool kprobe_direct_exec; }; #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 13d5232118df..bcdecc25461b 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -31,6 +31,14 @@ #include "core.h" +/* + * See register_usage_flags. If the probed instruction doesn't use PC, + * we can copy it into template and have it executed directly without + * simulation or emulation. + */ +#define ARM_REG_PC 15 +#define can_kprobe_direct_exec(m) (!test_bit(ARM_REG_PC, &(m))) + /* * NOTE: the first sub and add instruction will be modified according * to the stack cost of the instruction. @@ -71,7 +79,15 @@ asm ( " orrne r2, #1\n" " strne r2, [sp, #60] @ set bit0 of PC for thumb\n" " msr cpsr_cxsf, r1\n" + ".global optprobe_template_restore_begin\n" + "optprobe_template_restore_begin:\n" " ldmia sp, {r0 - r15}\n" + ".global optprobe_template_restore_orig_insn\n" + "optprobe_template_restore_orig_insn:\n" + " nop\n" + ".global optprobe_template_restore_end\n" + "optprobe_template_restore_end:\n" + " nop\n" ".global optprobe_template_val\n" "optprobe_template_val:\n" "1: .long 0\n" @@ -91,6 +107,12 @@ asm ( ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry) #define TMPL_SUB_SP \ ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry) +#define TMPL_RESTORE_BEGIN \ + ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry) +#define TMPL_RESTORE_ORIGN_INSN \ + ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry) +#define TMPL_RESTORE_END \ + ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry) /* * ARM can always optimize an instruction when using ARM ISA, except @@ -160,8 +182,12 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) __this_cpu_write(current_kprobe, NULL); } - /* In each case, we must singlestep the replaced instruction. */ - op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs); + /* + * We singlestep the replaced instruction only when it can't be + * executed directly during restore. + */ + if (!p->ainsn.kprobe_direct_exec) + op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs); local_irq_restore(flags); } @@ -243,6 +269,28 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or val = (unsigned long)optimized_callback; code[TMPL_CALL_IDX] = val; + /* If possible, copy insn and have it executed during restore */ + orig->ainsn.kprobe_direct_exec = false; + if (can_kprobe_direct_exec(orig->ainsn.register_usage_flags)) { + kprobe_opcode_t final_branch = arm_gen_branch( + (unsigned long)(&code[TMPL_RESTORE_END]), + (unsigned long)(op->kp.addr) + 4); + if (final_branch != 0) { + /* + * Replace original 'ldmia sp, {r0 - r15}' with + * 'ldmia {r0 - r14}', restore all registers except pc. + */ + code[TMPL_RESTORE_BEGIN] = __opcode_to_mem_arm(0xe89d7fff); + + /* The original probed instruction */ + code[TMPL_RESTORE_ORIGN_INSN] = __opcode_to_mem_arm(orig->opcode); + + /* Jump back to next instruction */ + code[TMPL_RESTORE_END] = __opcode_to_mem_arm(final_branch); + orig->ainsn.kprobe_direct_exec = true; + } + } + flush_icache_range((unsigned long)code, (unsigned long)(&code[TMPL_END_IDX])); -- cgit v1.2.3