summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 13:41:22 +0200
committerIngo Molnar <mingo@elte.hu>2008-06-27 14:31:35 +0200
commita25b5aca8740ea99d5e18dfc71235a52b685dcf7 (patch)
tree834bcfd9a51f840cc10693348ecffc99ce69550d
parentsched: dont micro manage share losses (diff)
downloadlinux-a25b5aca8740ea99d5e18dfc71235a52b685dcf7.tar.xz
linux-a25b5aca8740ea99d5e18dfc71235a52b685dcf7.zip
sched: no need to aggregate task_weight
We only need to know the task_weight of the busiest rq - nothing to do if there are no tasks there. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/sched_fair.c2
2 files changed, 2 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 28229c5d4983..716cfc8e099e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -433,12 +433,6 @@ struct cfs_rq {
* The sum of all runqueue weights within this span.
*/
unsigned long rq_weight;
-
- /*
- * Weight contributed by tasks; this is the part we can
- * influence by moving tasks around.
- */
- unsigned long task_weight;
} aggregate;
#endif
#endif
@@ -1473,10 +1467,6 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
* rq_weight:
* Direct sum of all the cpu's their rq weight, e.g. A would get 3 while
* B would get 2.
- *
- * task_weight:
- * Part of the rq_weight contributed by tasks; all groups except B would
- * get 1, B gets 2.
*/
static inline struct aggregate_struct *
@@ -1524,16 +1514,12 @@ static void
aggregate_group_weight(struct task_group *tg, int cpu, struct sched_domain *sd)
{
unsigned long rq_weight = 0;
- unsigned long task_weight = 0;
int i;
- for_each_cpu_mask(i, sd->span) {
+ for_each_cpu_mask(i, sd->span)
rq_weight += tg->cfs_rq[i]->load.weight;
- task_weight += tg->cfs_rq[i]->task_weight;
- }
aggregate(tg, cpu)->rq_weight = rq_weight;
- aggregate(tg, cpu)->task_weight = task_weight;
}
/*
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b10c0d61a2a9..03b9fbd9d648 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1427,7 +1427,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
/*
* empty group
*/
- if (!aggregate(tg, this_cpu)->task_weight)
+ if (!tg->cfs_rq[busiest_cpu]->task_weight)
continue;
rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight;