summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 17:05:14 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:52:42 +0100
commit96f874e26428ab5d2db681c100210c254775e154 (patch)
treee18a6f0629ef17f2344f3691c8df4692ccb875fa /kernel/sched_rt.c
parentsched: convert local_cpu_mask to cpumask_var_t. (diff)
downloadlinux-96f874e26428ab5d2db681c100210c254775e154.tar.xz
linux-96f874e26428ab5d2db681c100210c254775e154.zip
sched: convert remaining old-style cpumask operators
Impact: Trivial API conversion NR_CPUS -> nr_cpu_ids cpumask_t -> struct cpumask sizeof(cpumask_t) -> cpumask_size() cpumask_a = cpumask_b -> cpumask_copy(&cpumask_a, &cpumask_b) cpu_set() -> cpumask_set_cpu() first_cpu() -> cpumask_first() cpumask_of_cpu() -> cpumask_of() cpus_* -> cpumask_* There are some FIXMEs where we all archs to complete infrastructure (patches have been sent): cpu_coregroup_map -> cpu_coregroup_mask node_to_cpumask* -> cpumask_of_node There is also one FIXME where we pass an array of cpumasks to partition_sched_domains(): this implies knowing the definition of 'struct cpumask' and the size of a cpumask. This will be fixed in a future patch. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1f0e99d1a8ce..fb3964579a8a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -923,7 +923,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
- (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
+ (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
(p->rt.nr_cpus_allowed > 1))
return 1;
return 0;
@@ -982,7 +982,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
- cpumask_t *lowest_mask = __get_cpu_var(local_cpu_mask);
+ struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
@@ -997,7 +997,7 @@ static int find_lowest_rq(struct task_struct *task)
* I guess we might want to change cpupri_find() to ignore those
* in the first place.
*/
- cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
+ cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
/*
* At this point we have built a mask of cpus representing the
@@ -1007,7 +1007,7 @@ static int find_lowest_rq(struct task_struct *task)
* We prioritize the last cpu that the task executed on since
* it is most likely cache-hot in that location.
*/
- if (cpu_isset(cpu, *lowest_mask))
+ if (cpumask_test_cpu(cpu, lowest_mask))
return cpu;
/*
@@ -1064,8 +1064,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
- !cpu_isset(lowest_rq->cpu,
- task->cpus_allowed) ||
+ !cpumask_test_cpu(lowest_rq->cpu,
+ &task->cpus_allowed) ||
task_running(rq, task) ||
!task->se.on_rq)) {
@@ -1315,9 +1315,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
}
static void set_cpus_allowed_rt(struct task_struct *p,
- const cpumask_t *new_mask)
+ const struct cpumask *new_mask)
{
- int weight = cpus_weight(*new_mask);
+ int weight = cpumask_weight(new_mask);
BUG_ON(!rt_task(p));
@@ -1338,7 +1338,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
update_rt_migration(rq);
}
- p->cpus_allowed = *new_mask;
+ cpumask_copy(&p->cpus_allowed, new_mask);
p->rt.nr_cpus_allowed = weight;
}