diff options
author | Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> | 2010-12-03 10:54:03 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-12-06 17:59:30 +0100 |
commit | 61f4e13ffd85c037a942c5b7fd13f2b0c3162862 (patch) | |
tree | 29ce3816a1fbbed40005df664652e040d88117db /kernel | |
parent | kprobes: Cleanup disabling and unregistering path (diff) | |
download | linux-61f4e13ffd85c037a942c5b7fd13f2b0c3162862.tar.xz linux-61f4e13ffd85c037a942c5b7fd13f2b0c3162862.zip |
kprobes: Separate kprobe optimizing code from optimizer
Separate kprobe optimizing code from optimizer, this
will make easy to introducing unoptimizing code in
optimizer.
Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: 2nddept-manager@sdl.hitachi.co.jp
LKML-Reference: <20101203095403.2961.91201.stgit@ltc236.sdl.hitachi.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/kprobes.c | 43 |
1 files changed, 26 insertions, 17 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ab99caf2b167..1f4f9b9d5c89 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -427,26 +427,14 @@ static void kprobe_optimizer(struct work_struct *work); static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); #define OPTIMIZE_DELAY 5 -/* Kprobe jump optimizer */ -static __kprobes void kprobe_optimizer(struct work_struct *work) +/* + * Optimize (replace a breakpoint with a jump) kprobes listed on + * optimizing_list. + */ +static __kprobes void do_optimize_kprobes(void) { struct optimized_kprobe *op, *tmp; - /* Lock modules while optimizing kprobes */ - mutex_lock(&module_mutex); - mutex_lock(&kprobe_mutex); - if (kprobes_all_disarmed || !kprobes_allow_optimization) - goto end; - - /* - * Wait for quiesence period to ensure all running interrupts - * are done. Because optprobe may modify multiple instructions - * there is a chance that Nth instruction is interrupted. In that - * case, running interrupt can return to 2nd-Nth byte of jump - * instruction. This wait is for avoiding it. - */ - synchronize_sched(); - /* * The optimization/unoptimization refers online_cpus via * stop_machine() and cpu-hotplug modifies online_cpus. @@ -467,6 +455,27 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) } mutex_unlock(&text_mutex); put_online_cpus(); +} + +/* Kprobe jump optimizer */ +static __kprobes void kprobe_optimizer(struct work_struct *work) +{ + /* Lock modules while optimizing kprobes */ + mutex_lock(&module_mutex); + mutex_lock(&kprobe_mutex); + if (kprobes_all_disarmed || !kprobes_allow_optimization) + goto end; + + /* + * Wait for quiesence period to ensure all running interrupts + * are done. Because optprobe may modify multiple instructions + * there is a chance that Nth instruction is interrupted. In that + * case, running interrupt can return to 2nd-Nth byte of jump + * instruction. This wait is for avoiding it. + */ + synchronize_sched(); + + do_optimize_kprobes(); end: mutex_unlock(&kprobe_mutex); mutex_unlock(&module_mutex); |