summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-08-17 02:15:32 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-17 02:15:32 +0200
commitc100548d4610f727b95faffd69cb54cb280cd114 (patch)
tree0add41ffba7ced1f4742a327684fdb2bb57148c8 /kernel
parentMerge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kern... (diff)
parentsched: scale sysctl_sched_shares_ratelimit with nr_cpus (diff)
downloadlinux-c100548d4610f727b95faffd69cb54cb280cd114.tar.xz
linux-c100548d4610f727b95faffd69cb54cb280cd114.zip
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sched: scale sysctl_sched_shares_ratelimit with nr_cpus sched: fix rt-bandwidth hotplug race sched: fix the race between walk_tg_tree and sched_create_group
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_rt.c2
2 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 95e6ad3c231d..9a1ddb84e26d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -808,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32;
/*
* ratelimit for updating the group shares.
- * default: 0.5ms
+ * default: 0.25ms
*/
-const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
+unsigned int sysctl_sched_shares_ratelimit = 250000;
/*
* period over which we measure -rt task cpu usage in us.
@@ -5786,6 +5786,8 @@ static inline void sched_init_granularity(void)
sysctl_sched_latency = limit;
sysctl_sched_wakeup_granularity *= factor;
+
+ sysctl_sched_shares_ratelimit *= factor;
}
#ifdef CONFIG_SMP
@@ -8508,8 +8510,8 @@ struct task_group *sched_create_group(struct task_group *parent)
WARN_ON(!parent); /* root should already exist */
tg->parent = parent;
- list_add_rcu(&tg->siblings, &parent->children);
INIT_LIST_HEAD(&tg->children);
+ list_add_rcu(&tg->siblings, &parent->children);
spin_unlock_irqrestore(&task_group_lock, flags);
return tg;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 6163e4cf885b..998ba54b4543 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -298,7 +298,7 @@ static void __disable_runtime(struct rq *rq)
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
- if (iter == rt_rq)
+ if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
spin_lock(&iter->rt_runtime_lock);