summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2014-10-28 12:54:46 +0100
committerIngo Molnar <mingo@kernel.org>2014-11-04 07:17:52 +0100
commit75e23e49dbdd86aace375f599062aa67483a001b (patch)
treed3a26f83ed3a411b966193ab051901a8a5c60f1c /kernel/sched
parentsched: Check if we got a shallowest_idle_cpu before searching for least_loade... (diff)
downloadlinux-75e23e49dbdd86aace375f599062aa67483a001b.tar.xz
linux-75e23e49dbdd86aace375f599062aa67483a001b.zip
sched/core: Use dl_bw_of() under rcu_read_lock_sched()
As per commit f10e00f4bf36 ("sched/dl: Use dl_bw_of() under rcu_read_lock_sched()"), dl_bw_of() has to be protected by rcu_read_lock_sched(). Signed-off-by: Juri Lelli <juri.lelli@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1414497286-28824-1-git-send-email-juri.lelli@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 379cb87da69d..df0569ebec0f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4661,6 +4661,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
struct dl_bw *cur_dl_b;
unsigned long flags;
+ rcu_read_lock_sched();
cur_dl_b = dl_bw_of(cpumask_any(cur));
trial_cpus = cpumask_weight(trial);
@@ -4669,6 +4670,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
ret = 0;
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
+ rcu_read_unlock_sched();
return ret;
}
@@ -4697,11 +4699,13 @@ int task_can_attach(struct task_struct *p,
cs_cpus_allowed)) {
unsigned int dest_cpu = cpumask_any_and(cpu_active_mask,
cs_cpus_allowed);
- struct dl_bw *dl_b = dl_bw_of(dest_cpu);
+ struct dl_bw *dl_b;
bool overflow;
int cpus;
unsigned long flags;
+ rcu_read_lock_sched();
+ dl_b = dl_bw_of(dest_cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(dest_cpu);
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
@@ -4717,6 +4721,7 @@ int task_can_attach(struct task_struct *p,
__dl_add(dl_b, p->dl.dl_bw);
}
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+ rcu_read_unlock_sched();
}
#endif