summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-19 12:33:03 +0200
committerIngo Molnar <mingo@elte.hu>2008-08-28 13:42:38 +0200
commitcc2991cf15ae92fa30b3ea9f56a8a5a337bd33c7 (patch)
treea286839539678bb2b36e7e059b50a2e575e93782 /kernel/sched_rt.c
parentsched: fix sched_rt_rq_enqueue() resched idle (diff)
downloadlinux-cc2991cf15ae92fa30b3ea9f56a8a5a337bd33c7.tar.xz
linux-cc2991cf15ae92fa30b3ea9f56a8a5a337bd33c7.zip
sched: rt-bandwidth accounting fix
It fixes an accounting bug where we would continue accumulating runtime even though the bandwidth control is disabled. This would lead to very long throttle periods once bandwidth control gets turned on again. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 07d9b3307907..552310798dad 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -440,9 +440,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
- if (runtime == RUNTIME_INF)
- return 0;
-
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
@@ -493,9 +490,11 @@ static void update_curr_rt(struct rq *rq)
rt_rq = rt_rq_of_se(rt_se);
spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_time += delta_exec;
- if (sched_rt_runtime_exceeded(rt_rq))
- resched_task(curr);
+ if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
+ rt_rq->rt_time += delta_exec;
+ if (sched_rt_runtime_exceeded(rt_rq))
+ resched_task(curr);
+ }
spin_unlock(&rt_rq->rt_runtime_lock);
}
}