summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-11 07:46:10 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-11 12:10:53 +0200
commita08b159fc243dbfe415250466d24cfc5010deee5 (patch)
tree39bb59aaf183021e6d9b02ed26dc8a4930d00f0b /kernel
parentperf_counter: Put whole group on when enabling group leader (diff)
downloadlinux-a08b159fc243dbfe415250466d24cfc5010deee5.tar.xz
linux-a08b159fc243dbfe415250466d24cfc5010deee5.zip
perf_counter: don't count scheduler ticks as context switches
The context-switch software counter gives inflated values at present because each scheduler tick and each process-wide counter enable/disable prctl gets counted as a context switch. This happens because perf_counter_task_tick, perf_counter_task_disable and perf_counter_task_enable all call perf_counter_task_sched_out, which calls perf_swcounter_event to record a context switch event. This fixes it by introducing a variant of perf_counter_task_sched_out with two underscores in front for internal use within the perf_counter code, and makes perf_counter_task_{tick,disable,enable} call it. This variant doesn't record a context switch event, and takes a struct perf_counter_context *. This adds the new variant rather than changing the behaviour or interface of perf_counter_task_sched_out because that is called from other code. [ Impact: fix inflated context-switch event counts ] Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <18951.48034.485580.498953@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index a5bdc93ac477..7373b96bc36c 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -837,6 +837,14 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
cpuctx->task_ctx = NULL;
}
+static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
+ __perf_counter_sched_out(ctx, cpuctx);
+ cpuctx->task_ctx = NULL;
+}
+
static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
{
__perf_counter_sched_out(&cpuctx->ctx, cpuctx);
@@ -943,15 +951,13 @@ int perf_counter_task_disable(void)
struct perf_counter *counter;
unsigned long flags;
u64 perf_flags;
- int cpu;
if (likely(!ctx->nr_counters))
return 0;
local_irq_save(flags);
- cpu = smp_processor_id();
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
spin_lock(&ctx->lock);
@@ -989,7 +995,7 @@ int perf_counter_task_enable(void)
local_irq_save(flags);
cpu = smp_processor_id();
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
spin_lock(&ctx->lock);
@@ -1054,7 +1060,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
ctx = &curr->perf_counter_ctx;
perf_counter_cpu_sched_out(cpuctx);
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
rotate_ctx(&cpuctx->ctx);
rotate_ctx(ctx);