diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2021-06-23 06:24:39 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2021-06-30 18:49:29 +0200 |
commit | 9357a380f90a89a168d505561d11f68272e0e768 (patch) | |
tree | 3291facf3fadbbd98b7f5b7593211188d27f6ba2 /drivers/cpufreq | |
parent | cpufreq: intel_pstate: Combine ->stop_cpu() and ->offline() (diff) | |
download | linux-9357a380f90a89a168d505561d11f68272e0e768.tar.xz linux-9357a380f90a89a168d505561d11f68272e0e768.zip |
cpufreq: CPPC: Migrate to ->exit() callback instead of ->stop_cpu()
Commit 367dc4aa932b ("cpufreq: Add stop CPU callback to cpufreq_driver
interface") added the ->stop_cpu() callback to allow the drivers to do
clean up before the CPU is completely down and its state can't be
modified.
At that time the CPU hotplug framework used to call the cpufreq core's
registered notifier for different events like CPU_DOWN_PREPARE and
CPU_POST_DEAD. The ->stop_cpu() callback was called during the
CPU_DOWN_PREPARE event.
This is no longer the case, cpuhp_cpufreq_offline() is called only
once by the CPU hotplug core now and we don't really need two
separate callbacks for cpufreq drivers, i.e. ->stop_cpu() and
-<exit(), as everything can be done from the ->exit() callback
itself.
Migrate to using the ->exit() callback instead of ->stop_cpu().
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
[ rjw: Minor edits in the changelog and subject ]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 46 |
1 files changed, 24 insertions, 22 deletions
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 2f769b1630c5..be4f62e2c5f1 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -182,27 +182,6 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy) return 0; } -static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) -{ - struct cppc_cpudata *cpu_data = policy->driver_data; - struct cppc_perf_caps *caps = &cpu_data->perf_caps; - unsigned int cpu = policy->cpu; - int ret; - - cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; - - ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); - if (ret) - pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", - caps->lowest_perf, cpu, ret); - - /* Remove CPU node from list and free driver data for policy */ - free_cpumask_var(cpu_data->shared_cpu_map); - list_del(&cpu_data->node); - kfree(policy->driver_data); - policy->driver_data = NULL; -} - /* * The PCC subspace describes the rate at which platform can accept commands * on the shared PCC channel (including READs which do not count towards freq @@ -352,6 +331,29 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) return ret; } +static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct cppc_cpudata *cpu_data = policy->driver_data; + struct cppc_perf_caps *caps = &cpu_data->perf_caps; + unsigned int cpu = policy->cpu; + int ret; + + cpu_data->perf_ctrls.desired_perf = caps->lowest_perf; + + ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls); + if (ret) + pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n", + caps->lowest_perf, cpu, ret); + + /* Remove CPU node from list and free driver data for policy */ + free_cpumask_var(cpu_data->shared_cpu_map); + list_del(&cpu_data->node); + kfree(policy->driver_data); + policy->driver_data = NULL; + + return 0; +} + static inline u64 get_delta(u64 t1, u64 t0) { if (t1 > t0 || t0 > ~(u32)0) @@ -451,7 +453,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = { .target = cppc_cpufreq_set_target, .get = cppc_cpufreq_get_rate, .init = cppc_cpufreq_cpu_init, - .stop_cpu = cppc_cpufreq_stop_cpu, + .exit = cppc_cpufreq_cpu_exit, .set_boost = cppc_cpufreq_set_boost, .attr = cppc_cpufreq_attr, .name = "cppc_cpufreq", |