summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 19:45:00 +0200
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:45:00 +0200
commit4a55bd5e97b1775913f88f11108a4f144f590e89 (patch)
tree4514f2370d898b93086779c821023319fe4c8b9d /kernel
parentsched: fair-group scheduling vs latency (diff)
downloadlinux-4a55bd5e97b1775913f88f11108a4f144f590e89.tar.xz
linux-4a55bd5e97b1775913f88f11108a4f144f590e89.zip
sched: fair-group: de-couple load-balancing from the rb-trees
De-couple load-balancing from the rb-trees, so that I can change their organization. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c21
2 files changed, 21 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ae1a3e936d28..3202462109f5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -384,8 +384,12 @@ struct cfs_rq {
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
- struct rb_node *rb_load_balance_curr;
- /* 'curr' points to currently running entity on this cfs_rq.
+
+ struct list_head tasks;
+ struct list_head *balance_iterator;
+
+ /*
+ * 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr, *next;
@@ -2525,6 +2529,7 @@ static void __sched_fork(struct task_struct *p)
INIT_LIST_HEAD(&p->rt.run_list);
p->se.on_rq = 0;
+ INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_PREEMPT_NOTIFIERS
INIT_HLIST_HEAD(&p->preempt_notifiers);
@@ -7898,6 +7903,7 @@ int in_sched_functions(unsigned long addr)
static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
{
cfs_rq->tasks_timeline = RB_ROOT;
+ INIT_LIST_HEAD(&cfs_rq->tasks);
#ifdef CONFIG_FAIR_GROUP_SCHED
cfs_rq->rq = rq;
#endif
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9e301a2bab6f..ed8ce329899b 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -533,6 +533,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
add_cfs_task_weight(cfs_rq, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
+ list_add(&se->group_node, &cfs_rq->tasks);
}
static void
@@ -545,6 +546,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
add_cfs_task_weight(cfs_rq, -se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
+ list_del_init(&se->group_node);
}
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1289,21 +1291,24 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* the current task:
*/
static struct task_struct *
-__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
{
struct task_struct *p = NULL;
struct sched_entity *se;
- if (!curr)
+ if (next == &cfs_rq->tasks)
return NULL;
/* Skip over entities that are not tasks */
do {
- se = rb_entry(curr, struct sched_entity, run_node);
- curr = rb_next(curr);
- } while (curr && !entity_is_task(se));
+ se = list_entry(next, struct sched_entity, group_node);
+ next = next->next;
+ } while (next != &cfs_rq->tasks && !entity_is_task(se));
- cfs_rq->rb_load_balance_curr = curr;
+ if (next == &cfs_rq->tasks)
+ return NULL;
+
+ cfs_rq->balance_iterator = next;
if (entity_is_task(se))
p = task_of(se);
@@ -1315,14 +1320,14 @@ static struct task_struct *load_balance_start_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
- return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
+ return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
}
static struct task_struct *load_balance_next_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
- return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
+ return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
}
static unsigned long