summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-02-22 14:14:34 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-03-09 14:41:10 +0100
commit27de34823984e844f5dc042d39bb43f5dc98966f (patch)
treeadf647f0f42df0e3ef274b3035bd8bd3c04cabcd /drivers/cpufreq
parentcpufreq: governor: Make gov_set_update_util() static (diff)
downloadlinux-27de34823984e844f5dc042d39bb43f5dc98966f.tar.xz
linux-27de34823984e844f5dc042d39bb43f5dc98966f.zip
cpufreq: governor: Fix race in dbs_update_util_handler()
There is a scenario that may lead to undesired results in dbs_update_util_handler(). Namely, if two CPUs sharing a policy enter the funtion at the same time, pass the sample delay check and then one of them is stalled until dbs_work_handler() (queued up by the other CPU) clears the work counter, it may update the work counter and queue up another work item prematurely. To prevent that from happening, use the observation that the CPU queuing up a work item in dbs_update_util_handler() updates the last sample time. This means that if another CPU was stalling after passing the sample delay check and now successfully updated the work counter as a result of the race described above, it will see the new value of the last sample time which is different from what it used in the sample delay check before. If that happens, the sample delay check passed previously is not valid any more, so the CPU should not continue. Fixes: f17cbb53783c (cpufreq: governor: Avoid atomic operations in hot paths) Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_governor.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index c9a571fd79ac..064582aa5a0d 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -340,7 +340,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
{
struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
- u64 delta_ns;
+ u64 delta_ns, lst;
/*
* The work may not be allowed to be queued up right now.
@@ -356,7 +356,8 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
* of sample_delay_ns used in the computation may be stale.
*/
smp_rmb();
- delta_ns = time - policy_dbs->last_sample_time;
+ lst = READ_ONCE(policy_dbs->last_sample_time);
+ delta_ns = time - lst;
if ((s64)delta_ns < policy_dbs->sample_delay_ns)
return;
@@ -365,9 +366,19 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
* at this point. Otherwise, we need to ensure that only one of the
* CPUs sharing the policy will do that.
*/
- if (policy_dbs->is_shared &&
- !atomic_add_unless(&policy_dbs->work_count, 1, 1))
- return;
+ if (policy_dbs->is_shared) {
+ if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
+ return;
+
+ /*
+ * If another CPU updated last_sample_time in the meantime, we
+ * shouldn't be here, so clear the work counter and bail out.
+ */
+ if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
+ atomic_set(&policy_dbs->work_count, 0);
+ return;
+ }
+ }
policy_dbs->last_sample_time = time;
policy_dbs->work_in_progress = true;