summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2010-03-11 17:17:17 +0100
committerIngo Molnar <mingo@elte.hu>2010-03-11 18:32:52 +0100
commit5ca9880c6f4ba4c84b517bc2fed5366adf63d191 (patch)
tree2aa6abff8c3615cbb692364e986e3126e0c4099d
parentsched: Remove NORMALIZED_SLEEPER (diff)
downloadlinux-5ca9880c6f4ba4c84b517bc2fed5366adf63d191.tar.xz
linux-5ca9880c6f4ba4c84b517bc2fed5366adf63d191.zip
sched: Remove FAIR_SLEEPERS feature
Our preemption model relies too heavily on sleeper fairness to disable it without dire consequences. Remove the feature, and save a branch or two. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1268301520.6785.40.camel@marge.simson.net> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_features.h7
2 files changed, 1 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index de98e2e9d6e1..97682f925ed5 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -738,7 +738,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime += sched_vslice(cfs_rq, se);
/* sleeps up to a single latency don't count. */
- if (!initial && sched_feat(FAIR_SLEEPERS)) {
+ if (!initial) {
unsigned long thresh = sysctl_sched_latency;
/*
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 404288354aee..850f9809cf81 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,11 +1,4 @@
/*
- * Disregards a certain amount of sleep time (sched_latency_ns) and
- * considers the task to be running during that period. This gives it
- * a service deficit on wakeup, allowing it to run sooner.
- */
-SCHED_FEAT(FAIR_SLEEPERS, 1)
-
-/*
* Only give sleepers 50% of their service deficit. This allows
* them to run sooner, but does not allow tons of sleepers to
* rip the spread apart.