summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-08-26 13:06:50 +0200
committerIngo Molnar <mingo@kernel.org>2014-09-19 12:35:28 +0200
commitbd61c98f9b3f142cd63f9e15acfe203bec9e5f5a (patch)
tree22f81a29271fae693da87fc6df170828775c1be4 /kernel
parentARM: topology: Use the new cpu_capacity interface (diff)
downloadlinux-bd61c98f9b3f142cd63f9e15acfe203bec9e5f5a.tar.xz
linux-bd61c98f9b3f142cd63f9e15acfe203bec9e5f5a.zip
sched: Test the CPU's capacity in wake_affine()
Currently the task always wakes affine on this_cpu if the latter is idle. Before waking up the task on this_cpu, we check that this_cpu capacity is not significantly reduced because of RT tasks or irq activity. Use case where the number of irq and/or the time spent under irq is important will take benefit of this because the task that is woken up by irq or softirq will not use the same CPU than irq (and softirq) but a idle one. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: preeti@linux.vnet.ibm.com Cc: riel@redhat.com Cc: Morten.Rasmussen@arm.com Cc: efault@gmx.de Cc: nicolas.pitre@linaro.org Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1409051215-16788-8-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/fair.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index be530e40ceb9..74fa2c210b6d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4284,6 +4284,7 @@ static int wake_wide(struct task_struct *p)
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
{
s64 this_load, load;
+ s64 this_eff_load, prev_eff_load;
int idx, this_cpu, prev_cpu;
struct task_group *tg;
unsigned long weight;
@@ -4327,21 +4328,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
* Otherwise check if either cpus are near enough in load to allow this
* task to be woken on this_cpu.
*/
- if (this_load > 0) {
- s64 this_eff_load, prev_eff_load;
+ this_eff_load = 100;
+ this_eff_load *= capacity_of(prev_cpu);
+
+ prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
+ prev_eff_load *= capacity_of(this_cpu);
- this_eff_load = 100;
- this_eff_load *= capacity_of(prev_cpu);
+ if (this_load > 0) {
this_eff_load *= this_load +
effective_load(tg, this_cpu, weight, weight);
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= capacity_of(this_cpu);
prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
+ }
+
+ balanced = this_eff_load <= prev_eff_load;
- balanced = this_eff_load <= prev_eff_load;
- } else
- balanced = true;
schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
if (!balanced)