summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2016-02-11 13:01:14 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-03-09 14:41:01 +0100
commit581c214b21e4faba06d913952e38e80635d9ada5 (patch)
tree73d7d025dd49f268caf78c88d3f6dab1e29dc44a /drivers/cpufreq
parentcpufreq: Remove cpufreq_governor_lock (diff)
downloadlinux-581c214b21e4faba06d913952e38e80635d9ada5.tar.xz
linux-581c214b21e4faba06d913952e38e80635d9ada5.zip
cpufreq: governor: No need to manage state machine now
The cpufreq core now guarantees that policy->rwsem won't be dropped while running the ->governor callback for the CPUFREQ_GOV_POLICY_EXIT event and will be held acquired until the complete sequence of governor state changes has finished. This allows governor state machine checks to be dropped from multiple functions in cpufreq_governor.c. This also means that policy_dbs->policy can be initialized upfront, so the entire initialization of struct policy_dbs can be carried out in one place. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Tested-by: Juri Lelli <juri.lelli@arm.com> Tested-by: Shilpasri G Bhat <shilpa.bhat@linux.vnet.ibm.com> [ rjw: Changelog ] Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_governor.c27
1 files changed, 5 insertions, 22 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 2f35270fbd43..a34de9d10cbc 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -233,8 +233,10 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
synchronize_rcu();
}
-static void gov_cancel_work(struct policy_dbs_info *policy_dbs)
+static void gov_cancel_work(struct cpufreq_policy *policy)
{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+
/* Tell dbs_update_util_handler() to skip queuing up work items. */
atomic_inc(&policy_dbs->work_count);
/*
@@ -331,6 +333,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
if (!policy_dbs)
return NULL;
+ policy_dbs->policy = policy;
mutex_init(&policy_dbs->timer_mutex);
atomic_set(&policy_dbs->work_count, 0);
init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
@@ -458,10 +461,6 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
struct dbs_data *dbs_data = policy_dbs->dbs_data;
int count;
- /* State should be equivalent to INIT */
- if (policy_dbs->policy)
- return -EBUSY;
-
mutex_lock(&dbs_data->mutex);
list_del(&policy_dbs->list);
count = --dbs_data->usage_count;
@@ -497,10 +496,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
if (!policy->cur)
return -EINVAL;
- /* State should be equivalent to INIT */
- if (policy_dbs->policy)
- return -EBUSY;
-
sampling_rate = dbs_data->sampling_rate;
ignore_nice = dbs_data->ignore_nice_load;
@@ -525,7 +520,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
- policy_dbs->policy = policy;
if (gov->governor == GOV_CONSERVATIVE) {
struct cs_cpu_dbs_info_s *cs_dbs_info =
@@ -548,14 +542,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
static int cpufreq_governor_stop(struct cpufreq_policy *policy)
{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
-
- /* State should be equivalent to START */
- if (!policy_dbs->policy)
- return -EBUSY;
-
- gov_cancel_work(policy_dbs);
- policy_dbs->policy = NULL;
+ gov_cancel_work(policy);
return 0;
}
@@ -564,10 +551,6 @@ static int cpufreq_governor_limits(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
- /* State should be equivalent to START */
- if (!policy_dbs->policy)
- return -EBUSY;
-
mutex_lock(&policy_dbs->timer_mutex);
if (policy->max < policy->cur)
__cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);