summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2013-06-20 04:18:51 +0200
committerIngo Molnar <mingo@kernel.org>2013-06-27 10:07:36 +0200
commita003a25b227d59ded9197ced109517f037d01c27 (patch)
tree879a7f31942addac4420006a5e7c871c36cc92b4 /kernel
parentsched: Compute runnable load avg in cpu_load and cpu_avg_load_per_task (diff)
downloadlinux-a003a25b227d59ded9197ced109517f037d01c27.tar.xz
linux-a003a25b227d59ded9197ced109517f037d01c27.zip
sched: Consider runnable load average in move_tasks()
Aside from using runnable load average in background, move_tasks is also the key function in load balance. We need consider the runnable load average in it in order to make it an apple to apple load comparison. Morten had caught a div u64 bug on ARM, thanks! Thanks-to: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1371694737-29336-8-git-send-email-alex.shi@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e6d82cae4910..7948bb825985 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4179,11 +4179,14 @@ static int tg_load_down(struct task_group *tg, void *data)
long cpu = (long)data;
if (!tg->parent) {
- load = cpu_rq(cpu)->load.weight;
+ load = cpu_rq(cpu)->avg.load_avg_contrib;
} else {
+ unsigned long tmp_rla;
+ tmp_rla = tg->parent->cfs_rq[cpu]->runnable_load_avg + 1;
+
load = tg->parent->cfs_rq[cpu]->h_load;
- load *= tg->se[cpu]->load.weight;
- load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
+ load *= tg->se[cpu]->avg.load_avg_contrib;
+ load /= tmp_rla;
}
tg->cfs_rq[cpu]->h_load = load;
@@ -4209,12 +4212,9 @@ static void update_h_load(long cpu)
static unsigned long task_h_load(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
- unsigned long load;
-
- load = p->se.load.weight;
- load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
- return load;
+ return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
+ cfs_rq->runnable_load_avg + 1);
}
#else
static inline void update_blocked_averages(int cpu)
@@ -4227,7 +4227,7 @@ static inline void update_h_load(long cpu)
static unsigned long task_h_load(struct task_struct *p)
{
- return p->se.load.weight;
+ return p->se.avg.load_avg_contrib;
}
#endif