summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-25 18:41:53 +0200
committerIngo Molnar <mingo@elte.hu>2007-08-25 18:41:53 +0200
commit172ac3dbb7d3e528ac53d08a34df88d1ac53c534 (patch)
treef17de2a4a7a562792fd85a14bb1c278bb8c40804
parentsched: adaptive scheduler granularity (diff)
downloadlinux-172ac3dbb7d3e528ac53d08a34df88d1ac53c534.tar.xz
linux-172ac3dbb7d3e528ac53d08a34df88d1ac53c534.zip
sched: cleanup, sched_granularity -> sched_min_granularity
due to adaptive granularity scheduling the role of sched_granularity has changed to "minimum granularity", so rename the variable (and the tunable) accordingly. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sched_fair.c4
-rw-r--r--kernel/sysctl.c4
4 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 322764e04052..bd6a0320a770 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1400,7 +1400,7 @@ static inline void idle_task_exit(void) {}
extern void sched_idle_next(void);
extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_granularity;
+extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_batch_wakeup_granularity;
extern unsigned int sysctl_sched_stat_granularity;
diff --git a/kernel/sched.c b/kernel/sched.c
index da26f46d50d7..a40ab657ad19 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4913,9 +4913,9 @@ static inline void sched_init_granularity(void)
unsigned int factor = 1 + ilog2(num_online_cpus());
const unsigned long limit = 100000000;
- sysctl_sched_granularity *= factor;
- if (sysctl_sched_granularity > limit)
- sysctl_sched_granularity = limit;
+ sysctl_sched_min_granularity *= factor;
+ if (sysctl_sched_min_granularity > limit)
+ sysctl_sched_min_granularity = limit;
sysctl_sched_latency *= factor;
if (sysctl_sched_latency > limit)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0ba1e60f08d0..ee3771850aaf 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -40,7 +40,7 @@ unsigned int sysctl_sched_latency __read_mostly = 20000000ULL;
* Minimal preemption granularity for CPU-bound tasks:
* (default: 2 msec, units: nanoseconds)
*/
-unsigned int sysctl_sched_granularity __read_mostly = 2000000ULL;
+unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
/*
* SCHED_BATCH wake-up granularity.
@@ -258,7 +258,7 @@ sched_granularity(struct cfs_rq *cfs_rq)
if (nr > 1) {
gran = gran/nr - gran/nr/nr;
- gran = max(gran, sysctl_sched_granularity);
+ gran = max(gran, sysctl_sched_min_granularity);
}
return gran;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9e3d2960faf5..6ace893c17c9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -222,8 +222,8 @@ static ctl_table kern_table[] = {
#ifdef CONFIG_SCHED_DEBUG
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "sched_granularity_ns",
- .data = &sysctl_sched_granularity,
+ .procname = "sched_min_granularity_ns",
+ .data = &sysctl_sched_min_granularity,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,