summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-04-23 09:24:06 +0200
committerIngo Molnar <mingo@elte.hu>2008-05-05 23:56:18 +0200
commitdfbf4a1bc319f0f9a31e39b2da1fa5c55e85af89 (patch)
tree0b9dd19406c53a93452dd345bb05f76aa712a757 /kernel
parentsched: fair-group: fix a Div0 error of the fair group scheduler (diff)
downloadlinux-dfbf4a1bc319f0f9a31e39b2da1fa5c55e85af89.tar.xz
linux-dfbf4a1bc319f0f9a31e39b2da1fa5c55e85af89.zip
sched: fix cpu clock
David Miller pointed it out that nothing in cpu_clock() sets prev_cpu_time. This caused __sync_cpu_clock() to be called all the time - against the intention of this code. The result was that in practice we hit a global spinlock every time cpu_clock() is called - which - even though cpu_clock() is used for tracing and debugging, is suboptimal. While at it, also: - move the irq disabling to the outest layer, this should make cpu_clock() warp-free when called with irqs enabled. - use long long instead of cycles_t - for platforms where cycles_t is 32-bit. Reported-by: David Miller <davem@davemloft.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f98f75f3c708..9457106b18af 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -910,11 +910,14 @@ static DEFINE_PER_CPU(unsigned long long, prev_cpu_time);
static DEFINE_SPINLOCK(time_sync_lock);
static unsigned long long prev_global_time;
-static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
+static unsigned long long __sync_cpu_clock(unsigned long long time, int cpu)
{
- unsigned long flags;
-
- spin_lock_irqsave(&time_sync_lock, flags);
+ /*
+ * We want this inlined, to not get tracer function calls
+ * in this critical section:
+ */
+ spin_acquire(&time_sync_lock.dep_map, 0, 0, _THIS_IP_);
+ __raw_spin_lock(&time_sync_lock.raw_lock);
if (time < prev_global_time) {
per_cpu(time_offset, cpu) += prev_global_time - time;
@@ -923,7 +926,8 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
prev_global_time = time;
}
- spin_unlock_irqrestore(&time_sync_lock, flags);
+ __raw_spin_unlock(&time_sync_lock.raw_lock);
+ spin_release(&time_sync_lock.dep_map, 1, _THIS_IP_);
return time;
}
@@ -931,7 +935,6 @@ static unsigned long long __sync_cpu_clock(cycles_t time, int cpu)
static unsigned long long __cpu_clock(int cpu)
{
unsigned long long now;
- unsigned long flags;
struct rq *rq;
/*
@@ -941,11 +944,9 @@ static unsigned long long __cpu_clock(int cpu)
if (unlikely(!scheduler_running))
return 0;
- local_irq_save(flags);
rq = cpu_rq(cpu);
update_rq_clock(rq);
now = rq->clock;
- local_irq_restore(flags);
return now;
}
@@ -957,13 +958,18 @@ static unsigned long long __cpu_clock(int cpu)
unsigned long long cpu_clock(int cpu)
{
unsigned long long prev_cpu_time, time, delta_time;
+ unsigned long flags;
+ local_irq_save(flags);
prev_cpu_time = per_cpu(prev_cpu_time, cpu);
time = __cpu_clock(cpu) + per_cpu(time_offset, cpu);
delta_time = time-prev_cpu_time;
- if (unlikely(delta_time > time_sync_thresh))
+ if (unlikely(delta_time > time_sync_thresh)) {
time = __sync_cpu_clock(time, cpu);
+ per_cpu(prev_cpu_time, cpu) = time;
+ }
+ local_irq_restore(flags);
return time;
}