summaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2013-10-07 12:29:32 +0200
committerIngo Molnar <mingo@kernel.org>2013-10-09 14:48:08 +0200
commitca28aa53dd95868c9e38917b9881c09dacfacf1a (patch)
tree82d068c0c711dcba8a66d3760d6679586ae07638 /kernel/sched/fair.c
parentsched/numa: Decide whether to favour task or group weights based on swap cand... (diff)
downloadlinux-ca28aa53dd95868c9e38917b9881c09dacfacf1a.tar.xz
linux-ca28aa53dd95868c9e38917b9881c09dacfacf1a.zip
sched/numa: Fix task or group comparison
This patch separately considers task and group affinities when searching for swap candidates during NUMA placement. If tasks are part of the same group, or no group at all, the task weights are considered. Some hysteresis is added to prevent tasks within one group from getting bounced between NUMA nodes due to tiny differences. If tasks are part of different groups, the code compares group weights, in order to favor grouping task groups together. The patch also changes the group weight multiplier to be the same as the task weight multiplier, since the two are no longer added up like before. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-55-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6f454616fa86..423316cdee07 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -962,7 +962,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid)
if (!total_faults)
return 0;
- return 1200 * group_faults(p, nid) / total_faults;
+ return 1000 * group_faults(p, nid) / total_faults;
}
static unsigned long weighted_cpuload(const int cpu);
@@ -1068,16 +1068,34 @@ static void task_numa_compare(struct task_numa_env *env,
/*
* If dst and source tasks are in the same NUMA group, or not
- * in any group then look only at task weights otherwise give
- * priority to the group weights.
+ * in any group then look only at task weights.
*/
- if (!cur->numa_group || !env->p->numa_group ||
- cur->numa_group == env->p->numa_group) {
+ if (cur->numa_group == env->p->numa_group) {
imp = taskimp + task_weight(cur, env->src_nid) -
task_weight(cur, env->dst_nid);
+ /*
+ * Add some hysteresis to prevent swapping the
+ * tasks within a group over tiny differences.
+ */
+ if (cur->numa_group)
+ imp -= imp/16;
} else {
- imp = groupimp + group_weight(cur, env->src_nid) -
- group_weight(cur, env->dst_nid);
+ /*
+ * Compare the group weights. If a task is all by
+ * itself (not part of a group), use the task weight
+ * instead.
+ */
+ if (env->p->numa_group)
+ imp = groupimp;
+ else
+ imp = taskimp;
+
+ if (cur->numa_group)
+ imp += group_weight(cur, env->src_nid) -
+ group_weight(cur, env->dst_nid);
+ else
+ imp += task_weight(cur, env->src_nid) -
+ task_weight(cur, env->dst_nid);
}
}