summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-10-24 11:06:16 +0200
committerIngo Molnar <mingo@elte.hu>2008-10-24 12:51:03 +0200
commit3f3a490480d8ab96e0fe30a41f80f14e6a0c579d (patch)
treeaddbfb1207d816b1e1f5e4ab75f95a08c7b15202 /kernel
parentsched: re-instate vruntime based wakeup preemption (diff)
downloadlinux-3f3a490480d8ab96e0fe30a41f80f14e6a0c579d.tar.xz
linux-3f3a490480d8ab96e0fe30a41f80f14e6a0c579d.zip
sched: virtual time buddy preemption
Since we moved wakeup preemption back to virtual time, it makes sense to move the buddy stuff back as well. The purpose of the buddy scheduling is to allow a quickly scheduling pair of tasks to run away from the group as far as a regular busy task would be allowed under wakeup preemption. This has the advantage that the pair can ping-pong for a while, enjoying cache-hotness. Without buddy scheduling other tasks would interleave destroying the cache. Also, it saves a word in cfs_rq. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_fair.c10
2 files changed, 4 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 12bc367d9241..e8819bc6f462 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -386,7 +386,6 @@ struct cfs_rq {
u64 exec_clock;
u64 min_vruntime;
- u64 pair_start;
struct rb_root tasks_timeline;
struct rb_node *rb_leftmost;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7af17e04a6db..ce514afd78ff 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -791,16 +791,14 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+
static struct sched_entity *
pick_next(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- struct rq *rq = rq_of(cfs_rq);
- u64 pair_slice = rq->clock - cfs_rq->pair_start;
-
- if (!cfs_rq->next || pair_slice > sysctl_sched_min_granularity) {
- cfs_rq->pair_start = rq->clock;
+ if (!cfs_rq->next || wakeup_preempt_entity(cfs_rq->next, se) == 1)
return se;
- }
return cfs_rq->next;
}