summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c28
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
2 files changed, 36 insertions, 20 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0912d7ca8cd7..8d541c69aec6 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -54,8 +54,20 @@ static unsigned int def_sampling_rate;
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE \
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
+ * Define the minimal settable sampling rate to the greater of:
+ * - "HW transition latency" * 100 (same as default sampling / 10)
+ * - MIN_STAT_SAMPLING_RATE
+ * To avoid that userspace shoots itself.
+*/
+static unsigned int minimum_sampling_rate(void)
+{
+ return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
+}
+
+/* This will also vanish soon with removing sampling_rate_max */
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
-#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
+#define LATENCY_MULTIPLIER (1000)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
@@ -208,13 +220,11 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
ret = sscanf(buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > MAX_SAMPLING_RATE ||
- input < MIN_SAMPLING_RATE) {
+ if (ret != 1) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
-
- dbs_tuners_ins.sampling_rate = input;
+ dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
mutex_unlock(&dbs_mutex);
return count;
@@ -540,11 +550,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0)
latency = 1;
- def_sampling_rate = 10 * latency *
- DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
-
- if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
- def_sampling_rate = MIN_STAT_SAMPLING_RATE;
+ def_sampling_rate =
+ max(10 * latency * LATENCY_MULTIPLIER,
+ MIN_STAT_SAMPLING_RATE);
dbs_tuners_ins.sampling_rate = def_sampling_rate;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 32ddeaa42244..338f428a15b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -52,8 +52,20 @@ static unsigned int def_sampling_rate;
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE \
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
+ * Define the minimal settable sampling rate to the greater of:
+ * - "HW transition latency" * 100 (same as default sampling / 10)
+ * - MIN_STAT_SAMPLING_RATE
+ * To avoid that userspace shoots itself.
+*/
+static unsigned int minimum_sampling_rate(void)
+{
+ return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
+}
+
+/* This will also vanish soon with removing sampling_rate_max */
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
-#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
+#define LATENCY_MULTIPLIER (1000)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static void do_dbs_timer(struct work_struct *work);
@@ -255,13 +267,11 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
ret = sscanf(buf, "%u", &input);
mutex_lock(&dbs_mutex);
- if (ret != 1 || input > MAX_SAMPLING_RATE
- || input < MIN_SAMPLING_RATE) {
+ if (ret != 1) {
mutex_unlock(&dbs_mutex);
return -EINVAL;
}
-
- dbs_tuners_ins.sampling_rate = input;
+ dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
mutex_unlock(&dbs_mutex);
return count;
@@ -607,11 +617,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0)
latency = 1;
- def_sampling_rate = latency *
- DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
-
- if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
- def_sampling_rate = MIN_STAT_SAMPLING_RATE;
+ def_sampling_rate =
+ max(latency * LATENCY_MULTIPLIER,
+ MIN_STAT_SAMPLING_RATE);
dbs_tuners_ins.sampling_rate = def_sampling_rate;
}