From 1746babbb15594ba2d8d8196589bbbc2b5ff51c9 Mon Sep 17 00:00:00 2001 From: Byungchul Park Date: Thu, 20 Aug 2015 20:21:58 +0900 Subject: sched/fair: Have task_move_group_fair() also detach entity load from the old runqueue Since we attach the entity load to the new runqueue, we should also detatch the entity load from the old runqueue, otherwise load can accumulate. Signed-off-by: Byungchul Park [ Rewrote the changelog. ] Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: yuyang.du@intel.com Link: http://lkml.kernel.org/r/1440069720-27038-4-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel/sched/fair.c') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 959b2ea386b3..1e1fe7f796e9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8037,8 +8037,12 @@ static void task_move_group_fair(struct task_struct *p, int queued) if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING)) queued = 1; + cfs_rq = cfs_rq_of(se); if (!queued) - se->vruntime -= cfs_rq_of(se)->min_vruntime; + se->vruntime -= cfs_rq->min_vruntime; + + /* Synchronize task with its prev cfs_rq */ + detach_entity_load_avg(cfs_rq, se); set_task_rq(p, task_cpu(p)); se->depth = se->parent ? se->parent->depth + 1 : 0; cfs_rq = cfs_rq_of(se); -- cgit v1.2.3