summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorChris Friesen <cfriesen@nortel.com>2008-09-22 19:06:09 +0200
committerIngo Molnar <mingo@elte.hu>2008-09-22 19:43:10 +0200
commitcaea8a03702c147e8ae90da0801e7ba8297b1d46 (patch)
tree7ff0746c3dbcc73bc241957c2b3149bcf8901fb2 /kernel/sched_fair.c
parentsched: turn off WAKEUP_OVERLAP (diff)
downloadlinux-caea8a03702c147e8ae90da0801e7ba8297b1d46.tar.xz
linux-caea8a03702c147e8ae90da0801e7ba8297b1d46.zip
sched: fix list traversal to use _rcu variant
load_balance_fair() calls rcu_read_lock() but then traverses the list using the regular list traversal routine. This patch converts the list traversal to use the _rcu version. Signed-off-by: Chris Friesen <cfriesen@nortel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7328383690f1..3b89aa6594a9 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1521,7 +1521,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
rcu_read_lock();
update_h_load(busiest_cpu);
- list_for_each_entry(tg, &task_groups, list) {
+ list_for_each_entry_rcu(tg, &task_groups, list) {
struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
unsigned long busiest_h_load = busiest_cfs_rq->h_load;
unsigned long busiest_weight = busiest_cfs_rq->load.weight;