summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2015-07-15 02:04:38 +0200
committerIngo Molnar <mingo@kernel.org>2015-08-03 12:24:28 +0200
commit6c1d47c0827304949e0eb9479f4d587f226fac8b (patch)
treee56d3c53cc6af71b01507c7cc3335e53c88264d5 /kernel/sched
parentsched/fair: Rewrite runnable load and utilization average tracking (diff)
downloadlinux-6c1d47c0827304949e0eb9479f4d587f226fac8b.tar.xz
linux-6c1d47c0827304949e0eb9479f4d587f226fac8b.zip
sched/fair: Implement update_blocked_averages() for CONFIG_FAIR_GROUP_SCHED=n
The load and the utilization of idle CPUs must be updated periodically in order to decay the blocked part. If CONFIG_FAIR_GROUP_SCHED is not set, the load and util of idle cpus are not decayed and stay at the values set before becoming idle. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Yuyang Du <yuyang.du@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: arjan@linux.intel.com Cc: bsegall@google.com Cc: dietmar.eggemann@arm.com Cc: fengguang.wu@intel.com Cc: len.brown@intel.com Cc: morten.rasmussen@arm.com Cc: pjt@google.com Cc: rafael.j.wysocki@intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/1436918682-4971-4-git-send-email-yuyang.du@intel.com [ Fixed up the SOB chain. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 01ffa9509c23..e4b80c63633a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5846,6 +5846,14 @@ static unsigned long task_h_load(struct task_struct *p)
#else
static inline void update_blocked_averages(int cpu)
{
+ struct rq *rq = cpu_rq(cpu);
+ struct cfs_rq *cfs_rq = &rq->cfs;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_rq_clock(rq);
+ update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
static unsigned long task_h_load(struct task_struct *p)