diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 497 |
1 files changed, 269 insertions, 228 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 199b52b7c3e1..abda6609d3e7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -26,7 +26,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/syscore_ops.h> +#include <linux/suspend.h> #include <linux/tick.h> #include <trace/events/power.h> @@ -42,10 +42,11 @@ static DEFINE_RWLOCK(cpufreq_driver_lock); DEFINE_MUTEX(cpufreq_governor_lock); static LIST_HEAD(cpufreq_policy_list); -#ifdef CONFIG_HOTPLUG_CPU /* This one keeps track of the previously set governor of a removed CPU */ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); -#endif + +/* Flag to suspend/resume CPUFreq governors */ +static bool cpufreq_suspended; static inline bool has_target(void) { @@ -181,8 +182,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu) struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); if (!policy || IS_ERR(policy->clk)) { - pr_err("%s: No %s associated to cpu: %d\n", __func__, - policy ? "clk" : "policy", cpu); + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); return 0; } @@ -190,6 +191,12 @@ unsigned int cpufreq_generic_get(unsigned int cpu) } EXPORT_SYMBOL_GPL(cpufreq_generic_get); +/* Only for cpufreq core internal use */ +struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) +{ + return per_cpu(cpufreq_cpu_data, cpu); +} + struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) { struct cpufreq_policy *policy = NULL; @@ -254,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - pr_debug("saving %lu as reference value for loops_per_jiffy; " - "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); + pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", + l_p_j_ref, l_p_j_ref_freq); } - if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || - (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { + if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - pr_debug("scaling loops_per_jiffy to %lu " - "for frequency %u kHz\n", loops_per_jiffy, ci->new); + pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", + loops_per_jiffy, ci->new); } } #else @@ -282,7 +288,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, freqs->flags = cpufreq_driver->flags; pr_debug("notification %u of frequency transition to %u kHz\n", - state, freqs->new); + state, freqs->new); switch (state) { @@ -294,9 +300,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { - pr_debug("Warning: CPU frequency is" - " %u, cpufreq assumed %u kHz.\n", - freqs->old, policy->cur); + pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", + freqs->old, policy->cur); freqs->old = policy->cur; } } @@ -307,8 +312,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, - (unsigned long)freqs->cpu); + pr_debug("FREQ: %lu - CPU: %lu\n", + (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); @@ -326,16 +331,15 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, * function. It is called twice on all CPU frequency changes that have * external effects. */ -void cpufreq_notify_transition(struct cpufreq_policy *policy, +static void cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsigned int state) { for_each_cpu(freqs->cpu, policy->cpus) __cpufreq_notify_transition(policy, freqs, state); } -EXPORT_SYMBOL_GPL(cpufreq_notify_transition); /* Do post notifications when there are chances that transition has failed */ -void cpufreq_notify_post_transition(struct cpufreq_policy *policy, +static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int transition_failed) { cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); @@ -346,13 +350,47 @@ void cpufreq_notify_post_transition(struct cpufreq_policy *policy, cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); } -EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition); + +void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ +wait: + wait_event(policy->transition_wait, !policy->transition_ongoing); + + spin_lock(&policy->transition_lock); + + if (unlikely(policy->transition_ongoing)) { + spin_unlock(&policy->transition_lock); + goto wait; + } + + policy->transition_ongoing = true; + + spin_unlock(&policy->transition_lock); + + cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); + +void cpufreq_freq_transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int transition_failed) +{ + if (unlikely(WARN_ON(!policy->transition_ongoing))) + return; + + cpufreq_notify_post_transition(policy, freqs, transition_failed); + + policy->transition_ongoing = false; + + wake_up(&policy->transition_wait); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); /********************************************************************* * SYSFS INTERFACE * *********************************************************************/ -ssize_t show_boost(struct kobject *kobj, +static ssize_t show_boost(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); @@ -368,13 +406,13 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, return -EINVAL; if (cpufreq_boost_trigger_state(enable)) { - pr_err("%s: Cannot %s BOOST!\n", __func__, - enable ? "enable" : "disable"); + pr_err("%s: Cannot %s BOOST!\n", + __func__, enable ? "enable" : "disable"); return -EINVAL; } - pr_debug("%s: cpufreq BOOST %s\n", __func__, - enable ? "enabled" : "disabled"); + pr_debug("%s: cpufreq BOOST %s\n", + __func__, enable ? "enabled" : "disabled"); return count; } @@ -879,18 +917,25 @@ err_out_kobj_put: static void cpufreq_init_policy(struct cpufreq_policy *policy) { + struct cpufreq_governor *gov = NULL; struct cpufreq_policy new_policy; int ret = 0; memcpy(&new_policy, policy, sizeof(*policy)); + /* Update governor of new_policy to the governor used before hotplug */ + gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); + if (gov) + pr_debug("Restoring governor %s for cpu %d\n", + policy->governor->name, policy->cpu); + else + gov = CPUFREQ_DEFAULT_GOVERNOR; + + new_policy.governor = gov; + /* Use the default policy if its valid. */ if (cpufreq_driver->setpolicy) - cpufreq_parse_governor(policy->governor->name, - &new_policy.policy, NULL); - - /* assure that the starting sequence is run in cpufreq_set_policy */ - policy->governor = NULL; + cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); /* set default policy */ ret = cpufreq_set_policy(policy, &new_policy); @@ -927,8 +972,11 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, up_write(&policy->rwsem); if (has_target()) { - if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || - (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + + if (ret) { pr_err("%s: Failed to start governor\n", __func__); return ret; } @@ -949,6 +997,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) read_unlock_irqrestore(&cpufreq_driver_lock, flags); + policy->governor = NULL; + return policy; } @@ -968,6 +1018,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) INIT_LIST_HEAD(&policy->policy_list); init_rwsem(&policy->rwsem); + spin_lock_init(&policy->transition_lock); + init_waitqueue_head(&policy->transition_wait); return policy; @@ -1022,21 +1074,19 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) up_write(&policy->rwsem); - cpufreq_frequency_table_update_policy_cpu(policy); blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_UPDATE_POLICY_CPU, policy); } -static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, - bool frozen) +static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { unsigned int j, cpu = dev->id; int ret = -ENOMEM; struct cpufreq_policy *policy; unsigned long flags; + bool recover_policy = cpufreq_suspended; #ifdef CONFIG_HOTPLUG_CPU struct cpufreq_policy *tpolicy; - struct cpufreq_governor *gov; #endif if (cpu_is_offline(cpu)) @@ -1075,9 +1125,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, * Restore the saved policy when doing light-weight init and fall back * to the full init if that fails. */ - policy = frozen ? cpufreq_policy_restore(cpu) : NULL; + policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; if (!policy) { - frozen = false; + recover_policy = false; policy = cpufreq_policy_alloc(); if (!policy) goto nomem_out; @@ -1089,12 +1139,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, * the creation of a brand new one. So we need to perform this update * by invoking update_policy_cpu(). */ - if (frozen && cpu != policy->cpu) + if (recover_policy && cpu != policy->cpu) update_policy_cpu(policy, cpu); else policy->cpu = cpu; - policy->governor = CPUFREQ_DEFAULT_GOVERNOR; cpumask_copy(policy->cpus, cpumask_of(cpu)); init_completion(&policy->kobj_unregister); @@ -1118,7 +1167,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, */ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); - if (!frozen) { + if (!recover_policy) { policy->user_policy.min = policy->min; policy->user_policy.max = policy->max; } @@ -1180,16 +1229,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); -#ifdef CONFIG_HOTPLUG_CPU - gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); - if (gov) { - policy->governor = gov; - pr_debug("Restoring governor %s for cpu %d\n", - policy->governor->name, cpu); - } -#endif - - if (!frozen) { + if (!recover_policy) { ret = cpufreq_add_dev_interface(policy, dev); if (ret) goto err_out_unregister; @@ -1203,7 +1243,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, cpufreq_init_policy(policy); - if (!frozen) { + if (!recover_policy) { policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; } @@ -1226,7 +1266,7 @@ err_get_freq: if (cpufreq_driver->exit) cpufreq_driver->exit(policy); err_set_policy_cpu: - if (frozen) { + if (recover_policy) { /* Do not leave stale fallback data behind. */ per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; cpufreq_policy_put_kobj(policy); @@ -1250,7 +1290,7 @@ nomem_out: */ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) { - return __cpufreq_add_dev(dev, sif, false); + return __cpufreq_add_dev(dev, sif); } static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, @@ -1265,7 +1305,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); ret = kobject_move(&policy->kobj, &cpu_dev->kobj); if (ret) { - pr_err("%s: Failed to move kobj: %d", __func__, ret); + pr_err("%s: Failed to move kobj: %d\n", __func__, ret); down_write(&policy->rwsem); cpumask_set_cpu(old_cpu, policy->cpus); @@ -1281,8 +1321,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, } static int __cpufreq_remove_dev_prepare(struct device *dev, - struct subsys_interface *sif, - bool frozen) + struct subsys_interface *sif) { unsigned int cpu = dev->id, cpus; int new_cpu, ret; @@ -1296,7 +1335,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, policy = per_cpu(cpufreq_cpu_data, cpu); /* Save the policy somewhere when doing a light-weight tear-down */ - if (frozen) + if (cpufreq_suspended) per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); @@ -1314,11 +1353,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, } } -#ifdef CONFIG_HOTPLUG_CPU if (!cpufreq_driver->setpolicy) strncpy(per_cpu(cpufreq_cpu_governor, cpu), policy->governor->name, CPUFREQ_NAME_LEN); -#endif down_read(&policy->rwsem); cpus = cpumask_weight(policy->cpus); @@ -1331,19 +1368,19 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (new_cpu >= 0) { update_policy_cpu(policy, new_cpu); - if (!frozen) { + if (!cpufreq_suspended) pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", - __func__, new_cpu, cpu); - } + __func__, new_cpu, cpu); } + } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { + cpufreq_driver->stop_cpu(policy); } return 0; } static int __cpufreq_remove_dev_finish(struct device *dev, - struct subsys_interface *sif, - bool frozen) + struct subsys_interface *sif) { unsigned int cpu = dev->id, cpus; int ret; @@ -1373,12 +1410,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev, CPUFREQ_GOV_POLICY_EXIT); if (ret) { pr_err("%s: Failed to exit governor\n", - __func__); + __func__); return ret; } } - if (!frozen) + if (!cpufreq_suspended) cpufreq_policy_put_kobj(policy); /* @@ -1394,16 +1431,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev, list_del(&policy->policy_list); write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (!frozen) + if (!cpufreq_suspended) cpufreq_policy_free(policy); - } else { - if (has_target()) { - if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || - (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { - pr_err("%s: Failed to start governor\n", - __func__); - return ret; - } + } else if (has_target()) { + ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); + if (!ret) + ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + + if (ret) { + pr_err("%s: Failed to start governor\n", __func__); + return ret; } } @@ -1424,10 +1461,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) if (cpu_is_offline(cpu)) return 0; - ret = __cpufreq_remove_dev_prepare(dev, sif, false); + ret = __cpufreq_remove_dev_prepare(dev, sif); if (!ret) - ret = __cpufreq_remove_dev_finish(dev, sif, false); + ret = __cpufreq_remove_dev_finish(dev, sif); return ret; } @@ -1458,8 +1495,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, struct cpufreq_freqs freqs; unsigned long flags; - pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " - "core thinks of %u, is %u kHz.\n", old_freq, new_freq); + pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", + old_freq, new_freq); freqs.old = old_freq; freqs.new = new_freq; @@ -1468,8 +1505,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, policy = per_cpu(cpufreq_cpu_data, cpu); read_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_begin(policy, &freqs); + cpufreq_freq_transition_end(policy, &freqs, 0); } /** @@ -1570,83 +1607,103 @@ static struct subsys_interface cpufreq_interface = { .remove_dev = cpufreq_remove_dev, }; +/* + * In case platform wants some specific frequency to be configured + * during suspend.. + */ +int cpufreq_generic_suspend(struct cpufreq_policy *policy) +{ + int ret; + + if (!policy->suspend_freq) { + pr_err("%s: suspend_freq can't be zero\n", __func__); + return -EINVAL; + } + + pr_debug("%s: Setting suspend-freq: %u\n", __func__, + policy->suspend_freq); + + ret = __cpufreq_driver_target(policy, policy->suspend_freq, + CPUFREQ_RELATION_H); + if (ret) + pr_err("%s: unable to set suspend-freq: %u. err: %d\n", + __func__, policy->suspend_freq, ret); + + return ret; +} +EXPORT_SYMBOL(cpufreq_generic_suspend); + /** - * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. + * cpufreq_suspend() - Suspend CPUFreq governors * - * This function is only executed for the boot processor. The other CPUs - * have been put offline by means of CPU hotplug. + * Called during system wide Suspend/Hibernate cycles for suspending governors + * as some platforms can't change frequency after this point in suspend cycle. + * Because some of the devices (like: i2c, regulators, etc) they use for + * changing frequency are suspended quickly after this point. */ -static int cpufreq_bp_suspend(void) +void cpufreq_suspend(void) { - int ret = 0; - - int cpu = smp_processor_id(); struct cpufreq_policy *policy; - pr_debug("suspending cpu %u\n", cpu); + if (!cpufreq_driver) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - policy = cpufreq_cpu_get(cpu); - if (!policy) - return 0; + if (!has_target()) + return; - if (cpufreq_driver->suspend) { - ret = cpufreq_driver->suspend(policy); - if (ret) - printk(KERN_ERR "cpufreq: suspend failed in ->suspend " - "step on CPU %u\n", policy->cpu); + pr_debug("%s: Suspending Governors\n", __func__); + + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) + pr_err("%s: Failed to stop governor for policy: %p\n", + __func__, policy); + else if (cpufreq_driver->suspend + && cpufreq_driver->suspend(policy)) + pr_err("%s: Failed to suspend driver: %p\n", __func__, + policy); } - cpufreq_cpu_put(policy); - return ret; + cpufreq_suspended = true; } /** - * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. - * - * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) - * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are - * restored. It will verify that the current freq is in sync with - * what we believe it to be. This is a bit later than when it - * should be, but nonethteless it's better than calling - * cpufreq_driver->get() here which might re-enable interrupts... + * cpufreq_resume() - Resume CPUFreq governors * - * This function is only executed for the boot CPU. The other CPUs have not - * been turned on yet. + * Called during system wide Suspend/Hibernate cycle for resuming governors that + * are suspended with cpufreq_suspend(). */ -static void cpufreq_bp_resume(void) +void cpufreq_resume(void) { - int ret = 0; - - int cpu = smp_processor_id(); struct cpufreq_policy *policy; - pr_debug("resuming cpu %u\n", cpu); + if (!cpufreq_driver) + return; - /* If there's no policy for the boot CPU, we have nothing to do. */ - policy = cpufreq_cpu_get(cpu); - if (!policy) + if (!has_target()) return; - if (cpufreq_driver->resume) { - ret = cpufreq_driver->resume(policy); - if (ret) { - printk(KERN_ERR "cpufreq: resume failed in ->resume " - "step on CPU %u\n", policy->cpu); - goto fail; - } - } + pr_debug("%s: Resuming Governors\n", __func__); - schedule_work(&policy->update); + cpufreq_suspended = false; -fail: - cpufreq_cpu_put(policy); -} + list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { + if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) + pr_err("%s: Failed to resume driver: %p\n", __func__, + policy); + else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) + || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) + pr_err("%s: Failed to start governor for policy: %p\n", + __func__, policy); -static struct syscore_ops cpufreq_syscore_ops = { - .suspend = cpufreq_bp_suspend, - .resume = cpufreq_bp_resume, -}; + /* + * schedule call cpufreq_update_policy() for boot CPU, i.e. last + * policy in list. It will verify that the current freq is in + * sync with what we believe it to be. + */ + if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) + schedule_work(&policy->update); + } +} /** * cpufreq_get_current_driver - return current driver's name @@ -1762,7 +1819,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, target_freq = policy->min; pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", - policy->cpu, target_freq, relation, old_target_freq); + policy->cpu, target_freq, relation, old_target_freq); /* * This might look like a redundant call as we are checking it again @@ -1807,20 +1864,18 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, freqs.flags = 0; pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", - __func__, policy->cpu, freqs.old, - freqs.new); + __func__, policy->cpu, freqs.old, freqs.new); - cpufreq_notify_transition(policy, &freqs, - CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); } retval = cpufreq_driver->target_index(policy, index); if (retval) pr_err("%s: Failed to change cpu frequency: %d\n", - __func__, retval); + __func__, retval); if (notify) - cpufreq_notify_post_transition(policy, &freqs, retval); + cpufreq_freq_transition_end(policy, &freqs, retval); } out: @@ -1863,17 +1918,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, struct cpufreq_governor *gov = NULL; #endif + /* Don't start any governor operations if we are entering suspend */ + if (cpufreq_suspended) + return 0; + if (policy->governor->max_transition_latency && policy->cpuinfo.transition_latency > policy->governor->max_transition_latency) { if (!gov) return -EINVAL; else { - printk(KERN_WARNING "%s governor failed, too long" - " transition latency of HW, fallback" - " to %s governor\n", - policy->governor->name, - gov->name); + pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", + policy->governor->name, gov->name); policy->governor = gov; } } @@ -1883,7 +1939,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, return -EINVAL; pr_debug("__cpufreq_governor for CPU %u, event %u\n", - policy->cpu, event); + policy->cpu, event); mutex_lock(&cpufreq_governor_lock); if ((policy->governor_enabled && event == CPUFREQ_GOV_START) @@ -1950,9 +2006,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor) { -#ifdef CONFIG_HOTPLUG_CPU int cpu; -#endif if (!governor) return; @@ -1960,14 +2014,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) if (cpufreq_disabled()) return; -#ifdef CONFIG_HOTPLUG_CPU for_each_present_cpu(cpu) { if (cpu_online(cpu)) continue; if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); } -#endif mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); @@ -2012,22 +2064,21 @@ EXPORT_SYMBOL(cpufreq_get_policy); static int cpufreq_set_policy(struct cpufreq_policy *policy, struct cpufreq_policy *new_policy) { - int ret = 0, failed = 1; + struct cpufreq_governor *old_gov; + int ret; - pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu, - new_policy->min, new_policy->max); + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", + new_policy->cpu, new_policy->min, new_policy->max); memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); - if (new_policy->min > policy->max || new_policy->max < policy->min) { - ret = -EINVAL; - goto error_out; - } + if (new_policy->min > policy->max || new_policy->max < policy->min) + return -EINVAL; /* verify the cpu speed can be set within this limit */ ret = cpufreq_driver->verify(new_policy); if (ret) - goto error_out; + return ret; /* adjust if necessary - all reasons */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, @@ -2043,7 +2094,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, */ ret = cpufreq_driver->verify(new_policy); if (ret) - goto error_out; + return ret; /* notification of the new policy */ blocking_notifier_call_chain(&cpufreq_policy_notifier_list, @@ -2053,63 +2104,53 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->max = new_policy->max; pr_debug("new min and max freqs are %u - %u kHz\n", - policy->min, policy->max); + policy->min, policy->max); if (cpufreq_driver->setpolicy) { policy->policy = new_policy->policy; pr_debug("setting range\n"); - ret = cpufreq_driver->setpolicy(new_policy); - } else { - if (new_policy->governor != policy->governor) { - /* save old, working values */ - struct cpufreq_governor *old_gov = policy->governor; - - pr_debug("governor switch\n"); - - /* end old governor */ - if (policy->governor) { - __cpufreq_governor(policy, CPUFREQ_GOV_STOP); - up_write(&policy->rwsem); - __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_EXIT); - down_write(&policy->rwsem); - } + return cpufreq_driver->setpolicy(new_policy); + } - /* start new governor */ - policy->governor = new_policy->governor; - if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { - if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { - failed = 0; - } else { - up_write(&policy->rwsem); - __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_EXIT); - down_write(&policy->rwsem); - } - } + if (new_policy->governor == policy->governor) + goto out; - if (failed) { - /* new governor failed, so re-start old one */ - pr_debug("starting governor %s failed\n", - policy->governor->name); - if (old_gov) { - policy->governor = old_gov; - __cpufreq_governor(policy, - CPUFREQ_GOV_POLICY_INIT); - __cpufreq_governor(policy, - CPUFREQ_GOV_START); - } - ret = -EINVAL; - goto error_out; - } - /* might be a policy change, too, so fall through */ - } - pr_debug("governor: change or update limits\n"); - ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); + pr_debug("governor switch\n"); + + /* save old, working values */ + old_gov = policy->governor; + /* end old governor */ + if (old_gov) { + __cpufreq_governor(policy, CPUFREQ_GOV_STOP); + up_write(&policy->rwsem); + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + down_write(&policy->rwsem); } -error_out: - return ret; + /* start new governor */ + policy->governor = new_policy->governor; + if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { + if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) + goto out; + + up_write(&policy->rwsem); + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); + down_write(&policy->rwsem); + } + + /* new governor failed, so re-start old one */ + pr_debug("starting governor %s failed\n", policy->governor->name); + if (old_gov) { + policy->governor = old_gov; + __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); + __cpufreq_governor(policy, CPUFREQ_GOV_START); + } + + return -EINVAL; + + out: + pr_debug("governor: change or update limits\n"); + return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); } /** @@ -2145,8 +2186,13 @@ int cpufreq_update_policy(unsigned int cpu) */ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { new_policy.cur = cpufreq_driver->get(cpu); + if (WARN_ON(!new_policy.cur)) { + ret = -EIO; + goto no_policy; + } + if (!policy->cur) { - pr_debug("Driver did not initialize current freq"); + pr_debug("Driver did not initialize current freq\n"); policy->cur = new_policy.cur; } else { if (policy->cur != new_policy.cur && has_target()) @@ -2170,30 +2216,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, { unsigned int cpu = (unsigned long)hcpu; struct device *dev; - bool frozen = false; dev = get_cpu_device(cpu); if (dev) { - - if (action & CPU_TASKS_FROZEN) - frozen = true; - switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - __cpufreq_add_dev(dev, NULL, frozen); - cpufreq_update_policy(cpu); + __cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: - __cpufreq_remove_dev_prepare(dev, NULL, frozen); + __cpufreq_remove_dev_prepare(dev, NULL); break; case CPU_POST_DEAD: - __cpufreq_remove_dev_finish(dev, NULL, frozen); + __cpufreq_remove_dev_finish(dev, NULL); break; case CPU_DOWN_FAILED: - __cpufreq_add_dev(dev, NULL, frozen); + __cpufreq_add_dev(dev, NULL); break; } } @@ -2249,8 +2289,8 @@ int cpufreq_boost_trigger_state(int state) cpufreq_driver->boost_enabled = !state; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - pr_err("%s: Cannot %s BOOST\n", __func__, - state ? "enable" : "disable"); + pr_err("%s: Cannot %s BOOST\n", + __func__, state ? "enable" : "disable"); } return ret; @@ -2295,7 +2335,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (!driver_data || !driver_data->verify || !driver_data->init || !(driver_data->setpolicy || driver_data->target_index || - driver_data->target)) + driver_data->target) || + (driver_data->setpolicy && (driver_data->target_index || + driver_data->target))) return -EINVAL; pr_debug("trying to register driver %s\n", driver_data->name); @@ -2322,7 +2364,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) ret = cpufreq_sysfs_create_file(&boost.attr); if (ret) { pr_err("%s: cannot register global BOOST sysfs file\n", - __func__); + __func__); goto err_null_driver; } } @@ -2345,7 +2387,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) /* if all ->init() calls failed, unregister */ if (ret) { pr_debug("no CPU initialized for driver %s\n", - driver_data->name); + driver_data->name); goto err_if_unreg; } } @@ -2409,7 +2451,6 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create(); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); return 0; } |