summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMike Galbraith <mgalbraith@suse.de>2011-11-22 15:21:26 +0100
committerIngo Molnar <mingo@elte.hu>2011-12-06 09:06:24 +0100
commit916671c08b7808aebec87cc56c85788e665b3c6b (patch)
treeb3cd42d8fc5ba64f3ee0bff332278758c656141c /kernel/sched
parentsched: Use rt.nr_cpus_allowed to recover select_task_rq() cycles (diff)
downloadlinux-916671c08b7808aebec87cc56c85788e665b3c6b.tar.xz
linux-916671c08b7808aebec87cc56c85788e665b3c6b.zip
sched: Set skip_clock_update in yield_task_fair()
This is another case where we are on our way to schedule(), so can save a useless clock update and resulting microscopic vruntime update. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1321971686.6855.18.camel@marge.simson.net Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/fair.c6
2 files changed, 13 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ca8fd44145ac..db313c33af29 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4547,6 +4547,13 @@ again:
*/
if (preempt && rq != p_rq)
resched_task(p_rq->curr);
+ } else {
+ /*
+ * We might have set it in task_yield_fair(), but are
+ * not going to schedule(), so don't want to skip
+ * the next update.
+ */
+ rq->skip_clock_update = 0;
}
out:
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8e534a05e3ed..81ccb811afb4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3075,6 +3075,12 @@ static void yield_task_fair(struct rq *rq)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+ /*
+ * Tell update_rq_clock() that we've just updated,
+ * so we don't do microscopic update in schedule()
+ * and double the fastpath cost.
+ */
+ rq->skip_clock_update = 1;
}
set_skip_buddy(se);