diff options
author | Luca Abeni <luca.abeni@santannapisa.it> | 2017-05-18 22:13:31 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-06-08 10:31:51 +0200 |
commit | c52f14d384628db0217a7a9080ab800d5ffb2d72 (patch) | |
tree | 20f76c5658c194086d5ea9e8dd65dde0505447ef /kernel | |
parent | sched/deadline: Fix the update of the total -deadline utilization (diff) | |
download | linux-c52f14d384628db0217a7a9080ab800d5ffb2d72.tar.xz linux-c52f14d384628db0217a7a9080ab800d5ffb2d72.zip |
sched/deadline: Implement GRUB accounting
According to the GRUB (Greedy Reclaimation of Unused Bandwidth)
reclaiming algorithm, the runtime is not decreased as "dq = -dt",
but as "dq = -Uact dt" (where Uact is the per-runqueue active
utilization).
Hence, this commit modifies the runtime accounting rule in
update_curr_dl() to implement the GRUB rule.
Tested-by: Daniel Bristot de Oliveira <bristot@redhat.com>
Signed-off-by: Luca Abeni <luca.abeni@santannapisa.it>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Claudio Scordino <claudio@evidence.eu.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Juri Lelli <juri.lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tommaso Cucinotta <tommaso.cucinotta@sssup.it>
Link: http://lkml.kernel.org/r/1495138417-6203-5-git-send-email-luca.abeni@santannapisa.it
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 17 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 21 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 126339daebd7..b68a1fa05244 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2423,7 +2423,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) unsigned long to_ratio(u64 period, u64 runtime) { if (runtime == RUNTIME_INF) - return 1ULL << 20; + return BW_UNIT; /* * Doing this here saves a lot of checks in all @@ -2433,7 +2433,7 @@ unsigned long to_ratio(u64 period, u64 runtime) if (period == 0) return 0; - return div64_u64(runtime << 20, period); + return div64_u64(runtime << BW_SHIFT, period); } #ifdef CONFIG_SMP diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index add9cba1253c..0bee537554f6 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -918,6 +918,22 @@ int dl_runtime_exceeded(struct sched_dl_entity *dl_se) extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); /* + * This function implements the GRUB accounting rule: + * according to the GRUB reclaiming algorithm, the runtime is + * not decreased as "dq = -dt", but as "dq = -Uact dt", where + * Uact is the (per-runqueue) active utilization. + * Since rq->dl.running_bw contains Uact * 2^BW_SHIFT, the result + * has to be shifted right by BW_SHIFT. + */ +u64 grub_reclaim(u64 delta, struct rq *rq) +{ + delta *= rq->dl.running_bw; + delta >>= BW_SHIFT; + + return delta; +} + +/* * Update the current task's runtime statistics (provided it is still * a -deadline task and has not been removed from the dl_rq). */ @@ -959,6 +975,7 @@ static void update_curr_dl(struct rq *rq) sched_rt_avg_update(rq, delta_exec); + delta_exec = grub_reclaim(delta_exec, rq); dl_se->runtime -= delta_exec; throttle: diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c58f38905e0a..bb409ef40120 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1496,6 +1496,8 @@ extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime extern void init_dl_task_timer(struct sched_dl_entity *dl_se); extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se); +#define BW_SHIFT 20 +#define BW_UNIT (1 << BW_SHIFT) unsigned long to_ratio(u64 period, u64 runtime); extern void init_entity_runnable_average(struct sched_entity *se); |