summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-05-12 14:18:10 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-29 19:35:17 +0200
commit9a2dd585b2c431ec1e5d46a9d9568291c7a534cc (patch)
tree987633c0ac6fa9ce7c9f278157d9621f6f1c0fbb /kernel/sched/fair.c
parentsched/fair: Align PELT windows between cfs_rq and its se (diff)
downloadlinux-9a2dd585b2c431ec1e5d46a9d9568291c7a534cc.tar.xz
linux-9a2dd585b2c431ec1e5d46a9d9568291c7a534cc.zip
sched/fair: Implement more accurate async detach
The problem with the overestimate is that it will subtract too big a value from the load_sum, thereby pushing it down further than it ought to go. Since runnable_load_avg is not subject to a similar 'force', this results in the occasional 'runnable_load > load' situation. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 954b332cd899..67c39642a512 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3574,6 +3574,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
if (cfs_rq->removed.nr) {
unsigned long r;
+ u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
raw_spin_lock(&cfs_rq->removed.lock);
swap(cfs_rq->removed.util_avg, removed_util);
@@ -3582,17 +3583,13 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->removed.nr = 0;
raw_spin_unlock(&cfs_rq->removed.lock);
- /*
- * The LOAD_AVG_MAX for _sum is a slight over-estimate,
- * which is safe due to sub_positive() clipping at 0.
- */
r = removed_load;
sub_positive(&sa->load_avg, r);
- sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
+ sub_positive(&sa->load_sum, r * divider);
r = removed_util;
sub_positive(&sa->util_avg, r);
- sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
+ sub_positive(&sa->util_sum, r * divider);
add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);