summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 17:05:05 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:51:18 +0100
commitc6c4927b22a3514c6660f0e72c78716226bd3cc8 (patch)
treeee7fc9031eb14ac1c9d2f0732950b4faae2b6245 /kernel/sched_rt.c
parentsched: convert nohz_cpu_mask to cpumask_var_t. (diff)
downloadlinux-c6c4927b22a3514c6660f0e72c78716226bd3cc8.tar.xz
linux-c6c4927b22a3514c6660f0e72c78716226bd3cc8.zip
sched: convert struct root_domain to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS. Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. def_root_domain is static, and so its masks are initialized with alloc_bootmem_cpumask_var. After that, alloc_cpumask_var is used. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 4cd813abc23a..820fc422c6df 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq)
if (!rq->online)
return;
- cpu_set(rq->cpu, rq->rd->rto_mask);
+ cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
* the overload count. That is checked to determine
@@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq)
/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
- cpu_clear(rq->cpu, rq->rd->rto_mask);
+ cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}
static void update_rt_migration(struct rq *rq)
@@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
}
#ifdef CONFIG_SMP
-static inline cpumask_t sched_rt_period_mask(void)
+static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_rq(smp_processor_id())->rd->span;
}
#else
-static inline cpumask_t sched_rt_period_mask(void)
+static inline const struct cpumask *sched_rt_period_mask(void)
{
- return cpu_online_map;
+ return cpu_online_mask;
}
#endif
@@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
return rt_rq->rt_throttled;
}
-static inline cpumask_t sched_rt_period_mask(void)
+static inline const struct cpumask *sched_rt_period_mask(void)
{
- return cpu_online_map;
+ return cpu_online_mask;
}
static inline
@@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
int i, weight, more = 0;
u64 rt_period;
- weight = cpus_weight(rd->span);
+ weight = cpumask_weight(rd->span);
spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
- for_each_cpu_mask_nr(i, rd->span) {
+ for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
@@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq)
/*
* Greedy reclaim, take back as much as we can.
*/
- for_each_cpu_mask(i, rd->span) {
+ for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
@@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
int i, idle = 1;
- cpumask_t span;
+ const struct cpumask *span;
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return 1;
span = sched_rt_period_mask();
- for_each_cpu_mask(i, span) {
+ for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -1181,7 +1181,7 @@ static int pull_rt_task(struct rq *this_rq)
next = pick_next_task_rt(this_rq);
- for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
+ for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;