diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2013-04-23 10:27:41 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-24 08:52:45 +0200 |
commit | e6252c3ef4b9cd251b53f7b68035f395d20b044e (patch) | |
tree | ab331d8a6b9dfd9733c857cbf66944d278d72f2f /kernel | |
parent | sched: Move up affinity check to mitigate useless redoing overhead (diff) | |
download | linux-e6252c3ef4b9cd251b53f7b68035f395d20b044e.tar.xz linux-e6252c3ef4b9cd251b53f7b68035f395d20b044e.zip |
sched: Rename load_balance_tmpmask to load_balance_mask
This name doesn't represent specific meaning.
So rename it to imply it's purpose.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Jason Low <jason.low2@hp.com>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1366705662-3587-6-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 4 |
2 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ee8c1bd703fe..cb49b2ab0e16 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6873,7 +6873,7 @@ struct task_group root_task_group; LIST_HEAD(task_groups); #endif -DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); +DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); void __init sched_init(void) { @@ -6910,7 +6910,7 @@ void __init sched_init(void) #endif /* CONFIG_RT_GROUP_SCHED */ #ifdef CONFIG_CPUMASK_OFFSTACK for_each_possible_cpu(i) { - per_cpu(load_balance_tmpmask, i) = (void *)ptr; + per_cpu(load_balance_mask, i) = (void *)ptr; ptr += cpumask_size(); } #endif /* CONFIG_CPUMASK_OFFSTACK */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b8ef321641df..5b1e96687b49 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4977,7 +4977,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, #define MAX_PINNED_INTERVAL 512 /* Working cpumask for load_balance and load_balance_newidle. */ -DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask); +DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); static int need_active_balance(struct lb_env *env) { @@ -5012,7 +5012,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_group *group; struct rq *busiest; unsigned long flags; - struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); + struct cpumask *cpus = __get_cpu_var(load_balance_mask); struct lb_env env = { .sd = sd, |