summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 11:45:11 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-07 10:49:00 +0200
commita39d6f2556c4a19f58f538c6aa28bf8faca4fcb8 (patch)
tree8c2ca4d3042bc90beb8d733584c7427476ae92f7 /kernel
parentperf_counter: rework context time (diff)
downloadlinux-a39d6f2556c4a19f58f538c6aa28bf8faca4fcb8.tar.xz
linux-a39d6f2556c4a19f58f538c6aa28bf8faca4fcb8.zip
perf_counter: rework the task clock software counter
Rework the task clock software counter to use the context time instead of the task runtime clock, this removes the last such user. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.445450972@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c42
1 files changed, 12 insertions, 30 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 84d85ab4e161..56b7eb53d673 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -974,9 +974,6 @@ int perf_counter_task_disable(void)
curr_rq_lock_irq_save(&flags);
cpu = smp_processor_id();
- /* force the update of the task clock: */
- __task_delta_exec(curr, 1);
-
perf_counter_task_sched_out(curr, cpu);
spin_lock(&ctx->lock);
@@ -1017,9 +1014,6 @@ int perf_counter_task_enable(void)
curr_rq_lock_irq_save(&flags);
cpu = smp_processor_id();
- /* force the update of the task clock: */
- __task_delta_exec(curr, 1);
-
perf_counter_task_sched_out(curr, cpu);
spin_lock(&ctx->lock);
@@ -2347,38 +2341,28 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
* Software counter: task time clock
*/
-/*
- * Called from within the scheduler:
- */
-static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
-{
- struct task_struct *curr = counter->task;
- u64 delta;
-
- delta = __task_delta_exec(curr, update);
-
- return curr->se.sum_exec_runtime + delta;
-}
-
-static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
+static void task_clock_perf_counter_update(struct perf_counter *counter)
{
- u64 prev;
+ u64 prev, now;
s64 delta;
- prev = atomic64_read(&counter->hw.prev_count);
-
- atomic64_set(&counter->hw.prev_count, now);
+ update_context_time(counter->ctx);
+ now = counter->ctx->time;
+ prev = atomic64_xchg(&counter->hw.prev_count, now);
delta = now - prev;
-
atomic64_add(delta, &counter->count);
}
static int task_clock_perf_counter_enable(struct perf_counter *counter)
{
struct hw_perf_counter *hwc = &counter->hw;
+ u64 now;
+
+ update_context_time(counter->ctx);
+ now = counter->ctx->time;
- atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
+ atomic64_set(&hwc->prev_count, now);
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swcounter_hrtimer;
if (hwc->irq_period) {
@@ -2393,14 +2377,12 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
static void task_clock_perf_counter_disable(struct perf_counter *counter)
{
hrtimer_cancel(&counter->hw.hrtimer);
- task_clock_perf_counter_update(counter,
- task_clock_perf_counter_val(counter, 0));
+ task_clock_perf_counter_update(counter);
}
static void task_clock_perf_counter_read(struct perf_counter *counter)
{
- task_clock_perf_counter_update(counter,
- task_clock_perf_counter_val(counter, 1));
+ task_clock_perf_counter_update(counter);
}
static const struct hw_perf_counter_ops perf_ops_task_clock = {