summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_conservative.c
diff options
context:
space:
mode:
authorFabio Baltieri <fabio.baltieri@linaro.org>2012-12-27 15:55:41 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-02 00:01:13 +0100
commit66df2a01dfd715636f5c86f7afd05362e7e3fddd (patch)
tree419905e3de1dcb00787e277b85b2d8fcab350092 /drivers/cpufreq/cpufreq_conservative.c
parentcpufreq: ondemand: call dbs_check_cpu only when necessary (diff)
downloadlinux-66df2a01dfd715636f5c86f7afd05362e7e3fddd.tar.xz
linux-66df2a01dfd715636f5c86f7afd05362e7e3fddd.zip
cpufreq: conservative: call dbs_check_cpu only when necessary
Modify conservative timer to not resample CPU utilization if recently sampled from another SW coordinated core. Signed-off-by: Fabio Baltieri <fabio.baltieri@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c47
1 files changed, 41 insertions, 6 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b9d7f14d7d3d..5d8e8942ec97 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -111,22 +111,57 @@ static void cs_check_cpu(int cpu, unsigned int load)
}
}
-static void cs_dbs_timer(struct work_struct *work)
+static void cs_timer_update(struct cs_cpu_dbs_info_s *dbs_info, bool sample,
+ struct delayed_work *dw)
{
- struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
- struct cs_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cpu;
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
+ if (sample)
+ dbs_check_cpu(&cs_dbs_data, cpu);
+
+ schedule_delayed_work_on(smp_processor_id(), dw, delay);
+}
+
+static void cs_timer_coordinated(struct cs_cpu_dbs_info_s *dbs_info_local,
+ struct delayed_work *dw)
+{
+ struct cs_cpu_dbs_info_s *dbs_info;
+ ktime_t time_now;
+ s64 delta_us;
+ bool sample = true;
+
+ /* use leader CPU's dbs_info */
+ dbs_info = &per_cpu(cs_cpu_dbs_info, dbs_info_local->cdbs.cpu);
mutex_lock(&dbs_info->cdbs.timer_mutex);
- dbs_check_cpu(&cs_dbs_data, cpu);
+ time_now = ktime_get();
+ delta_us = ktime_us_delta(time_now, dbs_info->cdbs.time_stamp);
- schedule_delayed_work_on(smp_processor_id(), &dbs_info->cdbs.work,
- delay);
+ /* Do nothing if we recently have sampled */
+ if (delta_us < (s64)(cs_tuners.sampling_rate / 2))
+ sample = false;
+ else
+ dbs_info->cdbs.time_stamp = time_now;
+
+ cs_timer_update(dbs_info, sample, dw);
mutex_unlock(&dbs_info->cdbs.timer_mutex);
}
+static void cs_dbs_timer(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
+ struct cs_cpu_dbs_info_s, cdbs.work.work);
+
+ if (dbs_sw_coordinated_cpus(&dbs_info->cdbs)) {
+ cs_timer_coordinated(dbs_info, dw);
+ } else {
+ mutex_lock(&dbs_info->cdbs.timer_mutex);
+ cs_timer_update(dbs_info, true, dw);
+ mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ }
+}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{