diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-14 19:37:39 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-15 16:51:29 +0200 |
commit | 0763a660a84220cc3900fd32abdd7ad109e2278d (patch) | |
tree | 1b745884fb79627e05c026ee5bfb115b1f722db8 /kernel/sched_fair.c | |
parent | sched: Feature to disable APERF/MPERF cpu_power (diff) | |
download | linux-0763a660a84220cc3900fd32abdd7ad109e2278d.tar.xz linux-0763a660a84220cc3900fd32abdd7ad109e2278d.zip |
sched: Rename select_task_rq() argument
In order to be able to rename the sync argument, we need to rename
the current flag argument.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 19593568031a..b554e63c521a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1331,7 +1331,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) * * preempt must be disabled. */ -static int select_task_rq_fair(struct task_struct *p, int flag, int sync) +static int select_task_rq_fair(struct task_struct *p, int sd_flag, int sync) { struct sched_domain *tmp, *sd = NULL; int cpu = smp_processor_id(); @@ -1339,7 +1339,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) int new_cpu = cpu; int want_affine = 0; - if (flag & SD_BALANCE_WAKE) { + if (sd_flag & SD_BALANCE_WAKE) { if (sched_feat(AFFINE_WAKEUPS)) want_affine = 1; new_cpu = prev_cpu; @@ -1368,7 +1368,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) break; } - switch (flag) { + switch (sd_flag) { case SD_BALANCE_WAKE: if (!sched_feat(LB_WAKEUP_UPDATE)) break; @@ -1392,7 +1392,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) want_affine = 0; } - if (!(tmp->flags & flag)) + if (!(tmp->flags & sd_flag)) continue; sd = tmp; @@ -1402,12 +1402,12 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) struct sched_group *group; int weight; - if (!(sd->flags & flag)) { + if (!(sd->flags & sd_flag)) { sd = sd->child; continue; } - group = find_idlest_group(sd, p, cpu, flag); + group = find_idlest_group(sd, p, cpu, sd_flag); if (!group) { sd = sd->child; continue; @@ -1427,7 +1427,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync) for_each_domain(cpu, tmp) { if (weight <= cpumask_weight(sched_domain_span(tmp))) break; - if (tmp->flags & flag) + if (tmp->flags & sd_flag) sd = tmp; } /* while loop will break here if sd == NULL */ |