summaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-05-08 17:30:46 +0200
committerIngo Molnar <mingo@kernel.org>2017-09-29 19:35:15 +0200
commit0e2d2aaaae52c247c047d14999b93486bdbd3431 (patch)
tree7bff425ce22d58f3cdd054065eec5b5bd2ea8edf /kernel/sched/sched.h
parentsched/fair: Rewrite cfs_rq->removed_*avg (diff)
downloadlinux-0e2d2aaaae52c247c047d14999b93486bdbd3431.tar.xz
linux-0e2d2aaaae52c247c047d14999b93486bdbd3431.zip
sched/fair: Rewrite PELT migration propagation
When an entity migrates in (or out) of a runqueue, we need to add (or remove) its contribution from the entire PELT hierarchy, because even non-runnable entities are included in the load average sums. In order to do this we have some propagation logic that updates the PELT tree, however the way it 'propagates' the runnable (or load) change is (more or less): tg->weight * grq->avg.load_avg ge->avg.load_avg = ------------------------------ tg->load_avg But that is the expression for ge->weight, and per the definition of load_avg: ge->avg.load_avg := ge->weight * ge->avg.runnable_avg That destroys the runnable_avg (by setting it to 1) we wanted to propagate. Instead directly propagate runnable_sum. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2fd350a12bb7..5bcb86eb026b 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -448,18 +448,19 @@ struct cfs_rq {
#ifndef CONFIG_64BIT
u64 load_last_update_time_copy;
#endif
-#ifdef CONFIG_FAIR_GROUP_SCHED
- unsigned long tg_load_avg_contrib;
- unsigned long propagate_avg;
-#endif
struct {
raw_spinlock_t lock ____cacheline_aligned;
int nr;
unsigned long load_avg;
unsigned long util_avg;
+ unsigned long runnable_sum;
} removed;
#ifdef CONFIG_FAIR_GROUP_SCHED
+ unsigned long tg_load_avg_contrib;
+ long propagate;
+ long prop_runnable_sum;
+
/*
* h_load = weight * f(tg)
*