diff options
Diffstat (limited to 'drivers/cpufreq')
23 files changed, 308 insertions, 189 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index ebac67115009..7364a538e056 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ tristate "Freescale i.MX6 cpufreq support" depends on ARCH_MXC depends on REGULATOR_ANATOP + select PM_OPP help This adds cpufreq driver support for Freescale i.MX6 series SoCs. @@ -118,7 +119,7 @@ config ARM_INTEGRATOR If in doubt, say Y. config ARM_KIRKWOOD_CPUFREQ - def_bool MACH_KIRKWOOD + def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD help This adds the CPUFreq driver for Marvell Kirkwood SoCs. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 738c8b7b17dc..db6d9a2fea4d 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o -obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o +obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 1f4d4e315057..a46c223c2506 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -24,6 +24,7 @@ #include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/export.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/pm_opp.h> @@ -593,3 +594,7 @@ void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops) arm_bL_ops = NULL; } EXPORT_SYMBOL_GPL(bL_cpufreq_unregister); + +MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); +MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c index 8d9d59108906..4550f6976768 100644 --- a/drivers/cpufreq/arm_big_little_dt.c +++ b/drivers/cpufreq/arm_big_little_dt.c @@ -114,4 +114,4 @@ module_platform_driver(generic_bL_platdrv); MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index ee1ae303a07c..0d2172b07765 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -137,7 +137,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) * not yet registered, we should try defering probe. */ if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { - dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); + dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n"); ret = -EPROBE_DEFER; goto out_put_node; } @@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_put_reg; } - ret = of_init_opp_table(cpu_dev); - if (ret) { - pr_err("failed to init OPP table: %d\n", ret); - goto out_put_clk; - } + /* OPPs might be populated at runtime, don't check for error here */ + of_init_opp_table(cpu_dev); ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 62259d27f03e..d9fdeddcef96 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1076,10 +1076,20 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy) kfree(policy); } -static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) +static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, + struct device *cpu_dev) { + int ret; + if (WARN_ON(cpu == policy->cpu)) - return; + return 0; + + /* Move kobject to the new policy->cpu */ + ret = kobject_move(&policy->kobj, &cpu_dev->kobj); + if (ret) { + pr_err("%s: Failed to move kobj: %d\n", __func__, ret); + return ret; + } down_write(&policy->rwsem); @@ -1090,6 +1100,8 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_UPDATE_POLICY_CPU, policy); + + return 0; } static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) @@ -1154,7 +1166,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) * by invoking update_policy_cpu(). */ if (recover_policy && cpu != policy->cpu) - update_policy_cpu(policy, cpu); + WARN_ON(update_policy_cpu(policy, cpu, dev)); else policy->cpu = cpu; @@ -1307,38 +1319,11 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) return __cpufreq_add_dev(dev, sif); } -static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, - unsigned int old_cpu) -{ - struct device *cpu_dev; - int ret; - - /* first sibling now owns the new sysfs dir */ - cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu)); - - sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); - ret = kobject_move(&policy->kobj, &cpu_dev->kobj); - if (ret) { - pr_err("%s: Failed to move kobj: %d\n", __func__, ret); - - down_write(&policy->rwsem); - cpumask_set_cpu(old_cpu, policy->cpus); - up_write(&policy->rwsem); - - ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, - "cpufreq"); - - return -EINVAL; - } - - return cpu_dev->id; -} - static int __cpufreq_remove_dev_prepare(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id, cpus; - int new_cpu, ret; + int ret; unsigned long flags; struct cpufreq_policy *policy; @@ -1378,14 +1363,23 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (cpu != policy->cpu) { sysfs_remove_link(&dev->kobj, "cpufreq"); } else if (cpus > 1) { - new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); - if (new_cpu >= 0) { - update_policy_cpu(policy, new_cpu); + /* Nominate new CPU */ + int new_cpu = cpumask_any_but(policy->cpus, cpu); + struct device *cpu_dev = get_cpu_device(new_cpu); - if (!cpufreq_suspended) - pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", - __func__, new_cpu, cpu); + sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); + ret = update_policy_cpu(policy, new_cpu, cpu_dev); + if (ret) { + if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, + "cpufreq")) + pr_err("%s: Failed to restore kobj link to cpu:%d\n", + __func__, cpu_dev->id); + return ret; } + + if (!cpufreq_suspended) + pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", + __func__, new_cpu, cpu); } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { cpufreq_driver->stop_cpu(policy); } diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 18d409189092..ad3f38fd3eb9 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -170,21 +170,24 @@ static void od_check_cpu(int cpu, unsigned int load) dbs_freq_increase(policy, policy->max); } else { /* Calculate the next frequency proportional to load */ - unsigned int freq_next; - freq_next = load * policy->cpuinfo.max_freq / 100; + unsigned int freq_next, min_f, max_f; + + min_f = policy->cpuinfo.min_freq; + max_f = policy->cpuinfo.max_freq; + freq_next = min_f + load * (max_f - min_f) / 100; /* No longer fully busy, reset rate_mult */ dbs_info->rate_mult = 1; if (!od_tuners->powersave_bias) { __cpufreq_driver_target(policy, freq_next, - CPUFREQ_RELATION_L); + CPUFREQ_RELATION_C); return; } freq_next = od_ops.powersave_bias_target(policy, freq_next, CPUFREQ_RELATION_L); - __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C); } } diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c index c0c6f4a4eccf..f7a32d2326c6 100644 --- a/drivers/cpufreq/cpufreq_opp.c +++ b/drivers/cpufreq/cpufreq_opp.c @@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, goto out; } - freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL); + freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC); if (!freq_table) { ret = -ENOMEM; goto out; diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 1632981c4b25..df14766a8e06 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -117,7 +117,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, .frequency = 0, }; struct cpufreq_frequency_table *pos; - unsigned int freq, i = 0; + unsigned int freq, diff, i = 0; pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); @@ -127,6 +127,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, suboptimal.frequency = ~0; break; case CPUFREQ_RELATION_L: + case CPUFREQ_RELATION_C: optimal.frequency = ~0; break; } @@ -168,6 +169,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } } break; + case CPUFREQ_RELATION_C: + diff = abs(freq - target_freq); + if (diff < optimal.frequency || + (diff == optimal.frequency && + freq > table[optimal.driver_data].frequency)) { + optimal.frequency = diff; + optimal.driver_data = i; + } + break; } } if (optimal.driver_data > i) { diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index af366c21d4b4..c2d30765bf3d 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -66,10 +66,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) /* scaling up? scale voltage before frequency */ if (new_freq > old_freq) { - ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); - if (ret) { - dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret); - return ret; + if (!IS_ERR(pu_reg)) { + ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret); + return ret; + } } ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0); if (ret) { @@ -121,10 +123,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret); ret = 0; } - ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); - if (ret) { - dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret); - ret = 0; + if (!IS_ERR(pu_reg)) { + ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); + if (ret) { + dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret); + ret = 0; + } } } @@ -182,9 +186,9 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) } arm_reg = regulator_get(cpu_dev, "arm"); - pu_reg = regulator_get(cpu_dev, "pu"); + pu_reg = regulator_get_optional(cpu_dev, "pu"); soc_reg = regulator_get(cpu_dev, "soc"); - if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) { + if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) { dev_err(cpu_dev, "failed to get regulators\n"); ret = -ENOENT; goto put_reg; @@ -268,9 +272,11 @@ soc_opp_out: ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); if (ret > 0) transition_latency += ret * 1000; - ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); - if (ret > 0) - transition_latency += ret * 1000; + if (!IS_ERR(pu_reg)) { + ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); + if (ret > 0) + transition_latency += ret * 1000; + } /* * OPP is maintained in order of increasing frequency, and @@ -327,7 +333,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev) cpufreq_unregister_driver(&imx6q_cpufreq_driver); dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); regulator_put(arm_reg); - regulator_put(pu_reg); + if (!IS_ERR(pu_reg)) + regulator_put(pu_reg); regulator_put(soc_reg); clk_put(arm_clk); clk_put(pll1_sys_clk); diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index e5122f1bfe78..c1320528b9d0 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -92,7 +92,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, * Bind to the specified CPU. When this call returns, * we should be running on the right CPU. */ - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); BUG_ON(cpu != smp_processor_id()); /* get current setting */ @@ -118,7 +118,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, freqs.new = icst_hz(&cclk_params, vco) / 1000; if (freqs.old == freqs.new) { - set_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, &cpus_allowed); return 0; } @@ -141,7 +141,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, /* * Restore the CPUs allowed mask. */ - set_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, &cpus_allowed); cpufreq_freq_transition_end(policy, &freqs, 0); @@ -157,7 +157,7 @@ static unsigned int integrator_get(unsigned int cpu) cpus_allowed = current->cpus_allowed; - set_cpus_allowed(current, cpumask_of_cpu(cpu)); + set_cpus_allowed_ptr(current, cpumask_of(cpu)); BUG_ON(cpu != smp_processor_id()); /* detect memory etc. */ @@ -173,7 +173,7 @@ static unsigned int integrator_get(unsigned int cpu) current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ - set_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, &cpus_allowed); return current_freq; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 924bb2d42b1c..c5eac949760d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -37,7 +37,6 @@ #define BYT_TURBO_RATIOS 0x66c #define BYT_TURBO_VIDS 0x66d - #define FRAC_BITS 8 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) @@ -50,7 +49,7 @@ static inline int32_t mul_fp(int32_t x, int32_t y) static inline int32_t div_fp(int32_t x, int32_t y) { - return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); + return div_s64((int64_t)x << FRAC_BITS, y); } struct sample { @@ -128,6 +127,7 @@ static struct pstate_funcs pstate_funcs; struct perf_limits { int no_turbo; + int turbo_disabled; int max_perf_pct; int min_perf_pct; int32_t max_perf; @@ -147,7 +147,7 @@ static struct perf_limits limits = { }; static inline void pid_reset(struct _pid *pid, int setpoint, int busy, - int deadband, int integral) { + int deadband, int integral) { pid->setpoint = setpoint; pid->deadband = deadband; pid->integral = int_tofp(integral); @@ -166,7 +166,6 @@ static inline void pid_i_gain_set(struct _pid *pid, int percent) static inline void pid_d_gain_set(struct _pid *pid, int percent) { - pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); } @@ -206,16 +205,13 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); - pid_reset(&cpu->pid, - pid_params.setpoint, - 100, - pid_params.deadband, - 0); + pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0); } static inline void intel_pstate_reset_all_pid(void) { unsigned int cpu; + for_each_online_cpu(cpu) { if (all_cpu_data[cpu]) intel_pstate_busy_pid_reset(all_cpu_data[cpu]); @@ -229,13 +225,13 @@ static int pid_param_set(void *data, u64 val) intel_pstate_reset_all_pid(); return 0; } + static int pid_param_get(void *data, u64 *val) { *val = *(u32 *)data; return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, - pid_param_set, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n"); struct pid_param { char *name; @@ -252,9 +248,9 @@ static struct pid_param pid_files[] = { {NULL, NULL} }; -static struct dentry *debugfs_parent; -static void intel_pstate_debug_expose_params(void) +static void __init intel_pstate_debug_expose_params(void) { + struct dentry *debugfs_parent; int i = 0; debugfs_parent = debugfs_create_dir("pstate_snb", NULL); @@ -262,8 +258,8 @@ static void intel_pstate_debug_expose_params(void) return; while (pid_files[i].name) { debugfs_create_file(pid_files[i].name, 0660, - debugfs_parent, pid_files[i].value, - &fops_pid_param); + debugfs_parent, pid_files[i].value, + &fops_pid_param); i++; } } @@ -279,23 +275,28 @@ static void intel_pstate_debug_expose_params(void) } static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, - const char *buf, size_t count) + const char *buf, size_t count) { unsigned int input; int ret; + ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; limits.no_turbo = clamp_t(int, input, 0 , 1); - + if (limits.turbo_disabled) { + pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); + limits.no_turbo = limits.turbo_disabled; + } return count; } static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, - const char *buf, size_t count) + const char *buf, size_t count) { unsigned int input; int ret; + ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; @@ -303,14 +304,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); + return count; } static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, - const char *buf, size_t count) + const char *buf, size_t count) { unsigned int input; int ret; + ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; @@ -338,17 +341,16 @@ static struct attribute *intel_pstate_attributes[] = { static struct attribute_group intel_pstate_attr_group = { .attrs = intel_pstate_attributes, }; -static struct kobject *intel_pstate_kobject; -static void intel_pstate_sysfs_expose_params(void) +static void __init intel_pstate_sysfs_expose_params(void) { + struct kobject *intel_pstate_kobject; int rc; intel_pstate_kobject = kobject_create_and_add("intel_pstate", &cpu_subsys.dev_root->kobj); BUG_ON(!intel_pstate_kobject); - rc = sysfs_create_group(intel_pstate_kobject, - &intel_pstate_attr_group); + rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); BUG_ON(rc); } @@ -356,22 +358,25 @@ static void intel_pstate_sysfs_expose_params(void) static int byt_get_min_pstate(void) { u64 value; + rdmsrl(BYT_RATIOS, value); - return (value >> 8) & 0x3F; + return (value >> 8) & 0x7F; } static int byt_get_max_pstate(void) { u64 value; + rdmsrl(BYT_RATIOS, value); - return (value >> 16) & 0x3F; + return (value >> 16) & 0x7F; } static int byt_get_turbo_pstate(void) { u64 value; + rdmsrl(BYT_TURBO_RATIOS, value); - return value & 0x3F; + return value & 0x7F; } static void byt_set_pstate(struct cpudata *cpudata, int pstate) @@ -381,7 +386,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) u32 vid; val = pstate << 8; - if (limits.no_turbo) + if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -403,10 +408,9 @@ static void byt_get_vid(struct cpudata *cpudata) { u64 value; - rdmsrl(BYT_VIDS, value); - cpudata->vid.min = int_tofp((value >> 8) & 0x3f); - cpudata->vid.max = int_tofp((value >> 16) & 0x3f); + cpudata->vid.min = int_tofp((value >> 8) & 0x7f); + cpudata->vid.max = int_tofp((value >> 16) & 0x7f); cpudata->vid.ratio = div_fp( cpudata->vid.max - cpudata->vid.min, int_tofp(cpudata->pstate.max_pstate - @@ -416,10 +420,10 @@ static void byt_get_vid(struct cpudata *cpudata) cpudata->vid.turbo = value & 0x7f; } - static int core_get_min_pstate(void) { u64 value; + rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 40) & 0xFF; } @@ -427,6 +431,7 @@ static int core_get_min_pstate(void) static int core_get_max_pstate(void) { u64 value; + rdmsrl(MSR_PLATFORM_INFO, value); return (value >> 8) & 0xFF; } @@ -435,9 +440,10 @@ static int core_get_turbo_pstate(void) { u64 value; int nont, ret; + rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); nont = core_get_max_pstate(); - ret = ((value) & 255); + ret = (value) & 255; if (ret <= nont) ret = nont; return ret; @@ -448,7 +454,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate) u64 val; val = pstate << 8; - if (limits.no_turbo) + if (limits.no_turbo && !limits.turbo_disabled) val |= (u64)1 << 32; wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); @@ -489,12 +495,12 @@ static struct cpu_defaults byt_params = { }, }; - static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) { int max_perf = cpu->pstate.turbo_pstate; int max_perf_adj; int min_perf; + if (limits.no_turbo) max_perf = cpu->pstate.max_pstate; @@ -503,8 +509,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); - *min = clamp_t(int, min_perf, - cpu->pstate.min_pstate, max_perf); + *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); } static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) @@ -525,21 +530,6 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) pstate_funcs.set(cpu, pstate); } -static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) -{ - int target; - target = cpu->pstate.current_pstate + steps; - - intel_pstate_set_pstate(cpu, target); -} - -static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps) -{ - int target; - target = cpu->pstate.current_pstate - steps; - intel_pstate_set_pstate(cpu, target); -} - static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { cpu->pstate.min_pstate = pstate_funcs.get_min(); @@ -555,13 +545,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu) { struct sample *sample = &cpu->sample; int64_t core_pct; - int32_t rem; core_pct = int_tofp(sample->aperf) * int_tofp(100); - core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem); - - if ((rem << 1) >= int_tofp(sample->mperf)) - core_pct += 1; + core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); sample->freq = fp_toint( mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); @@ -572,12 +558,12 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu) static inline void intel_pstate_sample(struct cpudata *cpu) { u64 aperf, mperf; + unsigned long flags; + local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - - aperf = aperf >> FRAC_BITS; - mperf = mperf >> FRAC_BITS; + local_irq_restore(flags); cpu->last_sample_time = cpu->sample.time; cpu->sample.time = ktime_get(); @@ -594,10 +580,9 @@ static inline void intel_pstate_sample(struct cpudata *cpu) static inline void intel_pstate_set_sample_time(struct cpudata *cpu) { - int sample_time, delay; + int delay; - sample_time = pid_params.sample_rate_ms; - delay = msecs_to_jiffies(sample_time); + delay = msecs_to_jiffies(pid_params.sample_rate_ms); mod_timer_pinned(&cpu->timer, jiffies + delay); } @@ -612,12 +597,12 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) current_pstate = int_tofp(cpu->pstate.current_pstate); core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); - sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC); + sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC; duration_us = (u32) ktime_us_delta(cpu->sample.time, - cpu->last_sample_time); + cpu->last_sample_time); if (duration_us > sample_time * 3) { sample_ratio = div_fp(int_tofp(sample_time), - int_tofp(duration_us)); + int_tofp(duration_us)); core_busy = mul_fp(core_busy, sample_ratio); } @@ -628,20 +613,15 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) { int32_t busy_scaled; struct _pid *pid; - signed int ctl = 0; - int steps; + signed int ctl; pid = &cpu->pid; busy_scaled = intel_pstate_get_scaled_busy(cpu); ctl = pid_calc(pid, busy_scaled); - steps = abs(ctl); - - if (ctl < 0) - intel_pstate_pstate_increase(cpu, steps); - else - intel_pstate_pstate_decrease(cpu, steps); + /* Negative values of ctl increase the pstate and vice versa */ + intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); } static void intel_pstate_timer_func(unsigned long __data) @@ -696,14 +676,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; - intel_pstate_get_cpu_pstates(cpu); - cpu->cpu = cpunum; + intel_pstate_get_cpu_pstates(cpu); init_timer_deferrable(&cpu->timer); cpu->timer.function = intel_pstate_timer_func; - cpu->timer.data = - (unsigned long)cpu; + cpu->timer.data = (unsigned long)cpu; cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); intel_pstate_sample(cpu); @@ -741,14 +719,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf = int_tofp(1); limits.max_perf_pct = 100; limits.max_perf = int_tofp(1); - limits.no_turbo = 0; + limits.no_turbo = limits.turbo_disabled; return 0; } limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); - limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; + limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); @@ -760,8 +738,8 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) { cpufreq_verify_within_cpu_limits(policy); - if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && - (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) + if (policy->policy != CPUFREQ_POLICY_POWERSAVE && + policy->policy != CPUFREQ_POLICY_PERFORMANCE) return -EINVAL; return 0; @@ -784,6 +762,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) { struct cpudata *cpu; int rc; + u64 misc_en; rc = intel_pstate_init_cpu(policy->cpu); if (rc) @@ -791,8 +770,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) cpu = all_cpu_data[policy->cpu]; - if (!limits.no_turbo && - limits.min_perf_pct == 100 && limits.max_perf_pct == 100) + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); + if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { + limits.turbo_disabled = 1; + limits.no_turbo = 1; + } + if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) policy->policy = CPUFREQ_POLICY_PERFORMANCE; else policy->policy = CPUFREQ_POLICY_POWERSAVE; @@ -830,8 +814,8 @@ static int intel_pstate_msrs_not_valid(void) rdmsrl(MSR_IA32_MPERF, mperf); if (!pstate_funcs.get_max() || - !pstate_funcs.get_min() || - !pstate_funcs.get_turbo()) + !pstate_funcs.get_min() || + !pstate_funcs.get_turbo()) return -ENODEV; rdmsrl(MSR_IA32_APERF, tmp); @@ -913,14 +897,14 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void) struct acpi_table_header hdr; struct hw_vendor_info *v_info; - if (acpi_disabled - || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) + if (acpi_disabled || + ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) return false; for (v_info = vendor_info; v_info->valid; v_info++) { - if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) - && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) - && intel_pstate_no_acpi_pss()) + if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) && + !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + intel_pstate_no_acpi_pss()) return true; } diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index d4add8621944..9fa177206032 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -148,9 +148,9 @@ static void loongson2_cpu_wait(void) u32 cpu_freq; spin_lock_irqsave(&loongson2_wait_lock, flags); - cpu_freq = LOONGSON_CHIPCFG0; - LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ - LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ + cpu_freq = LOONGSON_CHIPCFG(0); + LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */ + LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */ spin_unlock_irqrestore(&loongson2_wait_lock, flags); local_irq_enable(); } diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c index 8bc422977b5b..4ff86878727f 100644 --- a/drivers/cpufreq/pmac64-cpufreq.c +++ b/drivers/cpufreq/pmac64-cpufreq.c @@ -499,8 +499,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode) } /* Lookup the i2c hwclock */ - for (hwclock = NULL; - (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){ + for_each_node_by_name(hwclock, "i2c-hwclock") { const char *loc = of_get_property(hwclock, "hwctrl-location", NULL); if (loc == NULL) diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index c8012bc86910..f91027259c3c 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -55,6 +55,7 @@ static const struct { unsigned freq; unsigned mult; } usual_frequency_table[] = { + { 350000, 35 }, // 100 * 3.5 { 400000, 40 }, // 100 * 4 { 450000, 45 }, // 100 * 4.5 { 475000, 50 }, // 95 * 5 diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index bb1d08dc8cc8..379c0837f5a9 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -28,6 +28,7 @@ #include <linux/of.h> #include <asm/cputhreads.h> +#include <asm/firmware.h> #include <asm/reg.h> #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */ @@ -98,7 +99,11 @@ static int init_powernv_pstates(void) return -ENODEV; } - WARN_ON(len_ids != len_freqs); + if (len_ids != len_freqs) { + pr_warn("Entries in ibm,pstate-ids and " + "ibm,pstate-frequencies-mhz does not match\n"); + } + nr_pstates = min(len_ids, len_freqs) / sizeof(u32); if (!nr_pstates) { pr_warn("No PStates found\n"); @@ -131,7 +136,12 @@ static unsigned int pstate_id_to_freq(int pstate_id) int i; i = powernv_pstate_info.max - pstate_id; - BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0); + if (i >= powernv_pstate_info.nr_pstates || i < 0) { + pr_warn("PState id %d outside of PState table, " + "reporting nominal id %d instead\n", + pstate_id, powernv_pstate_info.nominal); + i = powernv_pstate_info.max - powernv_pstate_info.nominal; + } return powernv_freqs[i].frequency; } @@ -321,6 +331,10 @@ static int __init powernv_cpufreq_init(void) { int rc = 0; + /* Don't probe on pseries (guest) platforms */ + if (!firmware_has_feature(FW_FEATURE_OPALv3)) + return -ENODEV; + /* Discover pstates from device tree and init */ rc = init_powernv_pstates(); if (rc) { diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c index cfa0dd8723ec..b8e5da8e188b 100644 --- a/drivers/cpufreq/s3c2410-cpufreq.c +++ b/drivers/cpufreq/s3c2410-cpufreq.c @@ -26,7 +26,6 @@ #include <mach/regs-clock.h> #include <plat/cpu.h> -#include <plat/clock.h> #include <plat/cpu-freq-core.h> /* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */ @@ -104,7 +103,6 @@ static struct s3c_cpufreq_info s3c2410_cpufreq_info = { .calc_iotiming = s3c2410_iotiming_calc, .set_iotiming = s3c2410_iotiming_set, .get_iotiming = s3c2410_iotiming_get, - .resume_clocks = s3c2410_setup_clocks, .set_fvco = s3c2410_set_fvco, .set_refresh = s3c2410_cpufreq_setrefresh, diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c index 4645b4898996..eb262133fef2 100644 --- a/drivers/cpufreq/s3c2412-cpufreq.c +++ b/drivers/cpufreq/s3c2412-cpufreq.c @@ -28,7 +28,6 @@ #include <mach/s3c2412.h> #include <plat/cpu.h> -#include <plat/clock.h> #include <plat/cpu-freq-core.h> /* our clock resources. */ @@ -188,8 +187,6 @@ static struct s3c_cpufreq_info s3c2412_cpufreq_info = { .set_iotiming = s3c2412_iotiming_set, .get_iotiming = s3c2412_iotiming_get, - .resume_clocks = s3c2412_setup_clocks, - .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs), }; diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c index f84ed10755b5..0129f5c70a61 100644 --- a/drivers/cpufreq/s3c2440-cpufreq.c +++ b/drivers/cpufreq/s3c2440-cpufreq.c @@ -29,7 +29,6 @@ #include <plat/cpu.h> #include <plat/cpu-freq-core.h> -#include <plat/clock.h> static struct clk *xtal; static struct clk *fclk; @@ -262,8 +261,6 @@ static struct s3c_cpufreq_info s3c2440_cpufreq_info = { .calc_divs = s3c2440_cpufreq_calcdivs, .calc_freqtable = s3c2440_cpufreq_calctable, - .resume_clocks = s3c244x_setup_clocks, - .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs), }; diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 227ebf7c1eea..d00f1cee4509 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -27,7 +27,6 @@ #include <asm/mach/map.h> #include <plat/cpu.h> -#include <plat/clock.h> #include <plat/cpu-freq-core.h> #include <mach/regs-clock.h> diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 19a10b89fef7..9a68225a757e 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -16,11 +16,70 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/cpufreq.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/regulator/consumer.h> -#include <mach/map.h> -#include <mach/regs-clock.h> +static void __iomem *clk_base; +static void __iomem *dmc_base[2]; + +#define S5P_CLKREG(x) (clk_base + (x)) + +#define S5P_APLL_LOCK S5P_CLKREG(0x00) +#define S5P_APLL_CON S5P_CLKREG(0x100) +#define S5P_CLK_SRC0 S5P_CLKREG(0x200) +#define S5P_CLK_SRC2 S5P_CLKREG(0x208) +#define S5P_CLK_DIV0 S5P_CLKREG(0x300) +#define S5P_CLK_DIV2 S5P_CLKREG(0x308) +#define S5P_CLK_DIV6 S5P_CLKREG(0x318) +#define S5P_CLKDIV_STAT0 S5P_CLKREG(0x1000) +#define S5P_CLKDIV_STAT1 S5P_CLKREG(0x1004) +#define S5P_CLKMUX_STAT0 S5P_CLKREG(0x1100) +#define S5P_CLKMUX_STAT1 S5P_CLKREG(0x1104) + +#define S5P_ARM_MCS_CON S5P_CLKREG(0x6100) + +/* CLKSRC0 */ +#define S5P_CLKSRC0_MUX200_SHIFT (16) +#define S5P_CLKSRC0_MUX200_MASK (0x1 << S5P_CLKSRC0_MUX200_SHIFT) +#define S5P_CLKSRC0_MUX166_MASK (0x1<<20) +#define S5P_CLKSRC0_MUX133_MASK (0x1<<24) + +/* CLKSRC2 */ +#define S5P_CLKSRC2_G3D_SHIFT (0) +#define S5P_CLKSRC2_G3D_MASK (0x3 << S5P_CLKSRC2_G3D_SHIFT) +#define S5P_CLKSRC2_MFC_SHIFT (4) +#define S5P_CLKSRC2_MFC_MASK (0x3 << S5P_CLKSRC2_MFC_SHIFT) + +/* CLKDIV0 */ +#define S5P_CLKDIV0_APLL_SHIFT (0) +#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT) +#define S5P_CLKDIV0_A2M_SHIFT (4) +#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT) +#define S5P_CLKDIV0_HCLK200_SHIFT (8) +#define S5P_CLKDIV0_HCLK200_MASK (0x7 << S5P_CLKDIV0_HCLK200_SHIFT) +#define S5P_CLKDIV0_PCLK100_SHIFT (12) +#define S5P_CLKDIV0_PCLK100_MASK (0x7 << S5P_CLKDIV0_PCLK100_SHIFT) +#define S5P_CLKDIV0_HCLK166_SHIFT (16) +#define S5P_CLKDIV0_HCLK166_MASK (0xF << S5P_CLKDIV0_HCLK166_SHIFT) +#define S5P_CLKDIV0_PCLK83_SHIFT (20) +#define S5P_CLKDIV0_PCLK83_MASK (0x7 << S5P_CLKDIV0_PCLK83_SHIFT) +#define S5P_CLKDIV0_HCLK133_SHIFT (24) +#define S5P_CLKDIV0_HCLK133_MASK (0xF << S5P_CLKDIV0_HCLK133_SHIFT) +#define S5P_CLKDIV0_PCLK66_SHIFT (28) +#define S5P_CLKDIV0_PCLK66_MASK (0x7 << S5P_CLKDIV0_PCLK66_SHIFT) + +/* CLKDIV2 */ +#define S5P_CLKDIV2_G3D_SHIFT (0) +#define S5P_CLKDIV2_G3D_MASK (0xF << S5P_CLKDIV2_G3D_SHIFT) +#define S5P_CLKDIV2_MFC_SHIFT (4) +#define S5P_CLKDIV2_MFC_MASK (0xF << S5P_CLKDIV2_MFC_SHIFT) + +/* CLKDIV6 */ +#define S5P_CLKDIV6_ONEDRAM_SHIFT (28) +#define S5P_CLKDIV6_ONEDRAM_MASK (0xF << S5P_CLKDIV6_ONEDRAM_SHIFT) static struct clk *dmc0_clk; static struct clk *dmc1_clk; @@ -142,9 +201,9 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq) void __iomem *reg = NULL; if (ch == DMC0) { - reg = (S5P_VA_DMC0 + 0x30); + reg = (dmc_base[0] + 0x30); } else if (ch == DMC1) { - reg = (S5P_VA_DMC1 + 0x30); + reg = (dmc_base[1] + 0x30); } else { printk(KERN_ERR "Cannot find DMC port\n"); return; @@ -472,7 +531,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) * check_mem_type : This driver only support LPDDR & LPDDR2. * other memory type is not supported. */ - mem_type = check_mem_type(S5P_VA_DMC0); + mem_type = check_mem_type(dmc_base[0]); if ((mem_type != LPDDR) && (mem_type != LPDDR2)) { printk(KERN_ERR "CPUFreq doesn't support this memory type\n"); @@ -481,10 +540,10 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) } /* Find current refresh counter and frequency each DMC */ - s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000); + s5pv210_dram_conf[0].refresh = (__raw_readl(dmc_base[0] + 0x30) * 1000); s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk); - s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000); + s5pv210_dram_conf[1].refresh = (__raw_readl(dmc_base[1] + 0x30) * 1000); s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk); policy->suspend_freq = SLEEP_FREQ; @@ -527,8 +586,55 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = { .notifier_call = s5pv210_cpufreq_reboot_notifier_event, }; -static int __init s5pv210_cpufreq_init(void) +static int s5pv210_cpufreq_probe(struct platform_device *pdev) { + struct device_node *np; + int id; + + /* + * HACK: This is a temporary workaround to get access to clock + * and DMC controller registers directly and remove static mappings + * and dependencies on platform headers. It is necessary to enable + * S5PV210 multi-platform support and will be removed together with + * this whole driver as soon as S5PV210 gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + clk_base = of_iomap(np, 0); + if (!clk_base) { + pr_err("%s: failed to map clock registers\n", __func__); + return -EFAULT; + } + + for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") { + id = of_alias_get_id(np, "dmc"); + if (id < 0 || id >= ARRAY_SIZE(dmc_base)) { + pr_err("%s: failed to get alias of dmc node '%s'\n", + __func__, np->name); + return id; + } + + dmc_base[id] = of_iomap(np, 0); + if (!dmc_base[id]) { + pr_err("%s: failed to map dmc%d registers\n", + __func__, id); + return -EFAULT; + } + } + + for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) { + if (!dmc_base[id]) { + pr_err("%s: failed to find dmc%d node\n", __func__, id); + return -ENODEV; + } + } + arm_regulator = regulator_get(NULL, "vddarm"); if (IS_ERR(arm_regulator)) { pr_err("failed to get regulator vddarm"); @@ -547,4 +653,11 @@ static int __init s5pv210_cpufreq_init(void) return cpufreq_register_driver(&s5pv210_driver); } -late_initcall(s5pv210_cpufreq_init); +static struct platform_driver s5pv210_cpufreq_platdrv = { + .driver = { + .name = "s5pv210-cpufreq", + .owner = THIS_MODULE, + }, + .probe = s5pv210_cpufreq_probe, +}; +module_platform_driver(s5pv210_cpufreq_platdrv); diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c index 546376719d8f..b5befc211172 100644 --- a/drivers/cpufreq/sa1110-cpufreq.c +++ b/drivers/cpufreq/sa1110-cpufreq.c @@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void) name = "K4S641632D"; if (machine_is_h3100()) name = "KM416S4030CT"; - if (machine_is_jornada720()) + if (machine_is_jornada720() || machine_is_h3600()) name = "K4S281632B-1H"; if (machine_is_nanoengine()) name = "MT48LC8M16A2TG-75"; diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c index 8635eec96da5..5fc96d5d656b 100644 --- a/drivers/cpufreq/speedstep-smi.c +++ b/drivers/cpufreq/speedstep-smi.c @@ -324,8 +324,8 @@ static int __init speedstep_init(void) return -ENODEV; } - pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " - "event:0x%.8ulx, perf_level:0x%.8ulx.\n", + pr_debug("signature:0x%.8x, command:0x%.8x, " + "event:0x%.8x, perf_level:0x%.8x.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); |