summaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm34
-rw-r--r--drivers/cpufreq/Kconfig.x8619
-rw-r--r--drivers/cpufreq/Makefile13
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c42
-rw-r--r--drivers/cpufreq/cpufreq.c460
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/cpufreq_governor.c131
-rw-r--r--drivers/cpufreq/cpufreq_governor.h6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c70
-rw-r--r--drivers/cpufreq/cpufreq_stats.c49
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c (renamed from drivers/cpufreq/db8500-cpufreq.c)109
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c192
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h48
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c153
-rw-r--r--drivers/cpufreq/exynos4x12-cpufreq.c389
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c179
-rw-r--r--drivers/cpufreq/freq_table.c15
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c120
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c336
-rw-r--r--drivers/cpufreq/intel_pstate.c823
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c259
-rw-r--r--drivers/cpufreq/maple-cpufreq.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/powernow-k8.c46
-rw-r--r--drivers/cpufreq/spear-cpufreq.c12
28 files changed, 2404 insertions, 1138 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e0a899f25e37..cbcb21e32771 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
config GENERIC_CPUFREQ_CPU0
- bool "Generic CPU0 cpufreq driver"
+ tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
select CPU_FREQ_TABLE
help
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0b3661d90b0..030ddf6dd3f1 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -21,8 +21,8 @@ config ARM_S3C2416_CPUFREQ
If in doubt, say N.
config ARM_S3C2416_CPUFREQ_VCORESCALE
- bool "Allow voltage scaling for S3C2416 arm core (EXPERIMENTAL)"
- depends on ARM_S3C2416_CPUFREQ && REGULATOR && EXPERIMENTAL
+ bool "Allow voltage scaling for S3C2416 arm core"
+ depends on ARM_S3C2416_CPUFREQ && REGULATOR
help
Enable CPU voltage scaling when entering the dvs mode.
It uses information gathered through existing hardware and
@@ -77,9 +77,39 @@ config ARM_EXYNOS5250_CPUFREQ
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+config ARM_KIRKWOOD_CPUFREQ
+ def_bool ARCH_KIRKWOOD && OF
+ help
+ This adds the CPUFreq driver for Marvell Kirkwood
+ SoCs.
+
+config ARM_IMX6Q_CPUFREQ
+ tristate "Freescale i.MX6Q cpufreq support"
+ depends on SOC_IMX6Q
+ depends on REGULATOR_ANATOP
+ help
+ This adds cpufreq driver support for Freescale i.MX6Q SOC.
+
+ If in doubt, say N.
+
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
+
+config ARM_HIGHBANK_CPUFREQ
+ tristate "Calxeda Highbank-based"
+ depends on ARCH_HIGHBANK
+ select CPU_FREQ_TABLE
+ select GENERIC_CPUFREQ_CPU0
+ select PM_OPP
+ select REGULATOR
+
+ default m
+ help
+ This adds the CPUFreq driver for Calxeda Highbank SoC
+ based boards.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 7227cd734042..d7dc0ed6adb0 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -2,6 +2,19 @@
# x86 CPU Frequency scaling drivers
#
+config X86_INTEL_PSTATE
+ bool "Intel P state control"
+ depends on X86
+ help
+ This driver provides a P state for Intel core processors.
+ The driver implements an internal governor and will become
+ the scaling driver and governor for Sandy bridge processors.
+
+ When this driver is enabled it will become the perferred
+ scaling driver for Sandy bridge processors.
+
+ If in doubt, say N.
+
config X86_PCC_CPUFREQ
tristate "Processor Clocking Control interface driver"
depends on ACPI && ACPI_PROCESSOR
@@ -174,7 +187,7 @@ config X86_SPEEDSTEP_ICH
config X86_SPEEDSTEP_SMI
tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
select CPU_FREQ_TABLE
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
(Coppermine), all mobile Intel Pentium III-M (Tualatin)
@@ -206,7 +219,7 @@ config X86_P4_CLOCKMOD
config X86_CPUFREQ_NFORCE2
tristate "nVidia nForce2 FSB changing"
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for FSB changing on nVidia nForce2
platforms.
@@ -242,7 +255,7 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
select CPU_FREQ_TABLE
- depends on X86_32 && EXPERIMENTAL
+ depends on X86_32
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
does not have any safeguards to prevent operating the CPU out of spec
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index fadc4d496e2f..863fd1865d45 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -19,11 +19,12 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
-# K8 systems. ACPI is preferred to all other hardware-specific drivers.
+# K8 systems. This is still the case but acpi-cpufreq errors out so that
+# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
-obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
+obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
@@ -39,10 +40,11 @@ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
+obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
+obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
@@ -50,8 +52,11 @@ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
-obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
+obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
+obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 7b0d49d78c61..937bc286591f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -734,7 +734,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
- if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
+ if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
}
@@ -762,6 +762,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 0xf) {
+ pr_debug("AMD K8 systems must use native drivers.\n");
+ result = -ENODEV;
+ goto err_unreg;
+ }
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
break;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index debc5a7c8db6..4e5b7fb8927c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -12,12 +12,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
-#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/opp.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -146,7 +146,6 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
* share the clock and voltage and clock. Use cpufreq affected_cpus
* interface to have all CPUs scaled together.
*/
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
@@ -177,34 +176,32 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.attr = cpu0_cpufreq_attr,
};
-static int cpu0_cpufreq_driver_init(void)
+static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
int ret;
- np = of_find_node_by_path("/cpus/cpu@0");
+ for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
+ if (of_get_property(np, "operating-points", NULL))
+ break;
+ }
+
if (!np) {
pr_err("failed to find cpu0 node\n");
return -ENOENT;
}
- cpu_dev = get_cpu_device(0);
- if (!cpu_dev) {
- pr_err("failed to get cpu0 device\n");
- ret = -ENODEV;
- goto out_put_node;
- }
-
+ cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
- cpu_clk = clk_get(cpu_dev, NULL);
+ cpu_clk = devm_clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
goto out_put_node;
}
- cpu_reg = regulator_get(cpu_dev, "cpu0");
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
pr_warn("failed to get cpu0 regulator\n");
cpu_reg = NULL;
@@ -267,7 +264,24 @@ out_put_node:
of_node_put(np);
return ret;
}
-late_initcall(cpu0_cpufreq_driver_init);
+
+static int cpu0_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&cpu0_cpufreq_driver);
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+
+ return 0;
+}
+
+static struct platform_driver cpu0_cpufreq_platdrv = {
+ .driver = {
+ .name = "cpufreq-cpu0",
+ .owner = THIS_MODULE,
+ },
+ .probe = cpu0_cpufreq_probe,
+ .remove = cpu0_cpufreq_remove,
+};
+module_platform_driver(cpu0_cpufreq_platdrv);
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1f93dbd72355..b02824d092e7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
* mode before doing so.
*
* Additional rules:
- * - All holders of the lock should check to make sure that the CPU they
- * are concerned with are online after they get the lock.
* - Governor routines that can be called in cpufreq hotplug path should not
* take this sem as top level hotplug notifier handler takes this.
* - Lock should not be held across
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
-static int lock_policy_rwsem_##mode \
-(int cpu) \
+static int lock_policy_rwsem_##mode(int cpu) \
{ \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
BUG_ON(policy_cpu == -1); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- if (unlikely(!cpu_online(cpu))) { \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
- return -1; \
- } \
\
return 0; \
}
lock_policy_rwsem(read, cpu);
-
lock_policy_rwsem(write, cpu);
-static void unlock_policy_rwsem_read(int cpu)
-{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
-}
-
-static void unlock_policy_rwsem_write(int cpu)
-{
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
- BUG_ON(policy_cpu == -1);
- up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
+#define unlock_policy_rwsem(mode, cpu) \
+static void unlock_policy_rwsem_##mode(int cpu) \
+{ \
+ int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
+ BUG_ON(policy_cpu == -1); \
+ up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
}
+unlock_policy_rwsem(read, cpu);
+unlock_policy_rwsem(write, cpu);
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
@@ -180,6 +168,9 @@ err_out:
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
+ if (cpufreq_disabled())
+ return NULL;
+
return __cpufreq_cpu_get(cpu, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
void cpufreq_cpu_put(struct cpufreq_policy *data)
{
+ if (cpufreq_disabled())
+ return;
+
__cpufreq_cpu_put(data, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
{
struct cpufreq_policy *policy;
+ unsigned long flags;
BUG_ON(irqs_disabled());
+ if (cpufreq_disabled())
+ return;
+
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
switch (state) {
case CPUFREQ_PRECHANGE:
@@ -294,7 +295,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
- trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
@@ -543,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
*/
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
- if (cpumask_empty(policy->related_cpus))
- return show_cpus(policy->cpus, buf);
return show_cpus(policy->related_cpus, buf);
}
@@ -700,87 +698,6 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
-/*
- * Returns:
- * Negative: Failure
- * 0: Success
- * Positive: When we have a managed CPU and the sysfs got symlinked
- */
-static int cpufreq_add_dev_policy(unsigned int cpu,
- struct cpufreq_policy *policy,
- struct device *dev)
-{
- int ret = 0;
-#ifdef CONFIG_SMP
- unsigned long flags;
- unsigned int j;
-#ifdef CONFIG_HOTPLUG_CPU
- struct cpufreq_governor *gov;
-
- gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
- if (gov) {
- policy->governor = gov;
- pr_debug("Restoring governor %s for cpu %d\n",
- policy->governor->name, cpu);
- }
-#endif
-
- for_each_cpu(j, policy->cpus) {
- struct cpufreq_policy *managed_policy;
-
- if (cpu == j)
- continue;
-
- /* Check for existing affected CPUs.
- * They may not be aware of it due to CPU Hotplug.
- * cpufreq_cpu_put is called when the device is removed
- * in __cpufreq_remove_dev()
- */
- managed_policy = cpufreq_cpu_get(j);
- if (unlikely(managed_policy)) {
-
- /* Set proper policy_cpu */
- unlock_policy_rwsem_write(cpu);
- per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
-
- if (lock_policy_rwsem_write(cpu) < 0) {
- /* Should not go through policy unlock path */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
- cpufreq_cpu_put(managed_policy);
- return -EBUSY;
- }
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpumask_copy(managed_policy->cpus, policy->cpus);
- per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&dev->kobj,
- &managed_policy->kobj,
- "cpufreq");
- if (ret)
- cpufreq_cpu_put(managed_policy);
- /*
- * Success. We only needed to be added to the mask.
- * Call driver->exit() because only the cpu parent of
- * the kobj needed to call init().
- */
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(policy);
-
- if (!ret)
- return 1;
- else
- return ret;
- }
- }
-#endif
- return ret;
-}
-
-
/* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu,
struct cpufreq_policy *policy)
@@ -794,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
if (j == cpu)
continue;
- if (!cpu_online(j))
- continue;
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
@@ -852,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus) {
- if (!cpu_online(j))
- continue;
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
@@ -885,6 +798,42 @@ err_out_kobj_put:
return ret;
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
+ struct device *dev)
+{
+ struct cpufreq_policy *policy;
+ int ret = 0;
+ unsigned long flags;
+
+ policy = cpufreq_cpu_get(sibling);
+ WARN_ON(!policy);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+
+ lock_policy_rwsem_write(sibling);
+
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpumask_set_cpu(cpu, policy->cpus);
+ per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
+ per_cpu(cpufreq_cpu_data, cpu) = policy;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ unlock_policy_rwsem_write(sibling);
+
+ __cpufreq_governor(policy, CPUFREQ_GOV_START);
+ __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+ ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
/**
* cpufreq_add_dev - add a CPU device
@@ -897,12 +846,12 @@ err_out_kobj_put:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
- int ret = 0, found = 0;
+ unsigned int j, cpu = dev->id;
+ int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
- unsigned int j;
#ifdef CONFIG_HOTPLUG_CPU
+ struct cpufreq_governor *gov;
int sibling;
#endif
@@ -919,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_cpu_put(policy);
return 0;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Check if this cpu was hot-unplugged earlier and has siblings */
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_online_cpu(sibling) {
+ struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
+ if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return cpufreq_add_policy_cpu(cpu, sibling, dev);
+ }
+ }
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
#endif
if (!try_module_get(cpufreq_driver->owner)) {
@@ -926,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto module_out;
}
- ret = -ENOMEM;
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy)
goto nomem_out;
@@ -938,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
goto err_free_cpumask;
policy->cpu = cpu;
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
- ret = (lock_policy_rwsem_write(cpu) < 0);
- WARN_ON(ret);
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
- /* Set governor before ->init, so that driver could check it */
-#ifdef CONFIG_HOTPLUG_CPU
- for_each_online_cpu(sibling) {
- struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
- if (cp && cp->governor &&
- (cpumask_test_cpu(cpu, cp->related_cpus))) {
- policy->governor = cp->governor;
- found = 1;
- break;
- }
- }
-#endif
- if (!found)
- policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
*/
ret = cpufreq_driver->init(policy);
if (ret) {
pr_debug("initialization failed\n");
- goto err_unlock_policy;
+ goto err_set_policy_cpu;
}
+
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, dev);
- if (ret) {
- if (ret > 0)
- /* This is a managed cpu, symlink created,
- exit with 0 */
- ret = 0;
- goto err_unlock_policy;
+#ifdef CONFIG_HOTPLUG_CPU
+ gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+ if (gov) {
+ policy->governor = gov;
+ pr_debug("Restoring governor %s for cpu %d\n",
+ policy->governor->name, cpu);
}
+#endif
ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
goto err_out_unregister;
- unlock_policy_rwsem_write(cpu);
-
kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner);
pr_debug("initialization complete\n");
return 0;
-
err_out_unregister:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
@@ -1007,8 +960,8 @@ err_out_unregister:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
-err_unlock_policy:
- unlock_policy_rwsem_write(cpu);
+err_set_policy_cpu:
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
@@ -1020,6 +973,22 @@ module_out:
return ret;
}
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
+ int j;
+
+ policy->last_cpu = policy->cpu;
+ policy->cpu = cpu;
+
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_policy_cpu, j) = cpu;
+
+#ifdef CONFIG_CPU_FREQ_TABLE
+ cpufreq_frequency_table_update_policy_cpu(policy);
+#endif
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_UPDATE_POLICY_CPU, policy);
+}
/**
* __cpufreq_remove_dev - remove a CPU device
@@ -1030,129 +999,103 @@ module_out:
*/
static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = dev->id;
+ unsigned int cpu = dev->id, ret, cpus;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
-#ifdef CONFIG_SMP
struct device *cpu_dev;
- unsigned int j;
-#endif
- pr_debug("unregistering CPU %u\n", cpu);
+ pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
+
data = per_cpu(cpufreq_cpu_data, cpu);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!data) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(cpu);
+ pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
}
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ if (cpufreq_driver->target)
+ __cpufreq_governor(data, CPUFREQ_GOV_STOP);
-#ifdef CONFIG_SMP
- /* if this isn't the CPU which is the parent of the kobj, we
- * only need to unlink, put and exit
- */
- if (unlikely(cpu != data->cpu)) {
- pr_debug("removing link\n");
- cpumask_clear_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &dev->kobj;
- cpufreq_cpu_put(data);
- unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- return 0;
- }
+#ifdef CONFIG_HOTPLUG_CPU
+ if (!cpufreq_driver->setpolicy)
+ strncpy(per_cpu(cpufreq_cpu_governor, cpu),
+ data->governor->name, CPUFREQ_NAME_LEN);
#endif
-#ifdef CONFIG_SMP
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ cpus = cpumask_weight(data->cpus);
+ cpumask_clear_cpu(cpu, data->cpus);
+ unlock_policy_rwsem_write(cpu);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
- CPUFREQ_NAME_LEN);
-#endif
+ if (cpu != data->cpu) {
+ sysfs_remove_link(&dev->kobj, "cpufreq");
+ } else if (cpus > 1) {
+ /* first sibling now owns the new sysfs dir */
+ cpu_dev = get_cpu_device(cpumask_first(data->cpus));
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = kobject_move(&data->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d", __func__, ret);
- /* if we have other CPUs still registered, we need to unlink them,
- * or else wait_for_completion below will lock up. Clean the
- * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
- * the sysfs links afterwards.
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- per_cpu(cpufreq_cpu_data, j) = NULL;
- }
- }
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ cpumask_set_cpu(cpu, data->cpus);
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, cpu) = data;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- for_each_cpu(j, data->cpus) {
- if (j == cpu)
- continue;
- pr_debug("removing link for cpu %u\n", j);
-#ifdef CONFIG_HOTPLUG_CPU
- strncpy(per_cpu(cpufreq_cpu_governor, j),
- data->governor->name, CPUFREQ_NAME_LEN);
-#endif
- cpu_dev = get_cpu_device(j);
- kobj = &cpu_dev->kobj;
unlock_policy_rwsem_write(cpu);
- sysfs_remove_link(kobj, "cpufreq");
- lock_policy_rwsem_write(cpu);
- cpufreq_cpu_put(data);
- }
- }
-#else
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
+ "cpufreq");
+ return -EINVAL;
+ }
- kobj = &data->kobj;
- cmp = &data->kobj_unregister;
- unlock_policy_rwsem_write(cpu);
- kobject_put(kobj);
+ WARN_ON(lock_policy_rwsem_write(cpu));
+ update_policy_cpu(data, cpu_dev->id);
+ unlock_policy_rwsem_write(cpu);
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, cpu_dev->id, cpu);
+ }
- /* we need to make sure that the underlying kobj is actually
- * not referenced anymore by anybody before we proceed with
- * unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
+ pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
+ cpufreq_cpu_put(data);
- lock_policy_rwsem_write(cpu);
- if (cpufreq_driver->exit)
- cpufreq_driver->exit(data);
- unlock_policy_rwsem_write(cpu);
+ /* If cpu is last user of policy, free policy */
+ if (cpus == 1) {
+ lock_policy_rwsem_read(cpu);
+ kobj = &data->kobj;
+ cmp = &data->kobj_unregister;
+ unlock_policy_rwsem_read(cpu);
+ kobject_put(kobj);
+
+ /* we need to make sure that the underlying kobj is actually
+ * not referenced anymore by anybody before we proceed with
+ * unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
-#ifdef CONFIG_HOTPLUG_CPU
- /* when the CPU which is the parent of the kobj is hotplugged
- * offline, check for siblings, and create cpufreq sysfs interface
- * and symlinks
- */
- if (unlikely(cpumask_weight(data->cpus) > 1)) {
- /* first sibling now owns the new sysfs dir */
- cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(data);
- /* finally remove our own symlink */
- lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(dev, sif);
+ free_cpumask_var(data->related_cpus);
+ free_cpumask_var(data->cpus);
+ kfree(data);
+ } else if (cpufreq_driver->target) {
+ __cpufreq_governor(data, CPUFREQ_GOV_START);
+ __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
}
-#endif
-
- free_cpumask_var(data->related_cpus);
- free_cpumask_var(data->cpus);
- kfree(data);
+ per_cpu(cpufreq_policy_cpu, cpu) = -1;
return 0;
}
@@ -1165,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1216,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy;
unsigned int ret_freq = 0;
+ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
+ return cpufreq_driver->get(cpu);
+
+ policy = cpufreq_cpu_get(cpu);
if (policy) {
ret_freq = policy->cur;
cpufreq_cpu_put(policy);
@@ -1386,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = {
.resume = cpufreq_bp_resume,
};
+/**
+ * cpufreq_get_current_driver - return current driver's name
+ *
+ * Return the name string of the currently loaded cpufreq driver
+ * or NULL, if none.
+ */
+const char *cpufreq_get_current_driver(void)
+{
+ if (cpufreq_driver)
+ return cpufreq_driver->name;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
/*********************************************************************
* NOTIFIER LISTS INTERFACE *
@@ -1408,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
WARN_ON(!init_cpufreq_transition_notifier_list_called);
switch (list) {
@@ -1442,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
+ if (cpufreq_disabled())
+ return -EINVAL;
+
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = srcu_notifier_chain_unregister(
@@ -1487,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (target_freq == policy->cur)
return 0;
- if (cpu_online(policy->cpu) && cpufreq_driver->target)
+ if (cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
return retval;
@@ -1522,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
int ret = 0;
- if (!(cpu_online(cpu) && cpufreq_driver->getavg))
+ if (cpufreq_disabled())
+ return ret;
+
+ if (!cpufreq_driver->getavg)
return 0;
policy = cpufreq_cpu_get(policy->cpu);
@@ -1577,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
policy->cpu, event);
ret = policy->governor->governor(policy, event);
+ if (event == CPUFREQ_GOV_START)
+ policy->governor->initialized++;
+ else if (event == CPUFREQ_GOV_STOP)
+ policy->governor->initialized--;
+
/* we keep one module reference alive for
each CPU governed by this CPU */
if ((event != CPUFREQ_GOV_START) || ret)
@@ -1600,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
+ governor->initialized = 0;
err = -EBUSY;
if (__find_governor(governor->name) == NULL) {
err = 0;
@@ -1797,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu)
pr_debug("Driver did not initialize current freq");
data->cur = policy.cur;
} else {
- if (data->cur != policy.cur)
+ if (data->cur != policy.cur && cpufreq_driver->target)
cpufreq_out_of_sync(cpu, data->cur,
policy.cur);
}
@@ -1829,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
- if (unlikely(lock_policy_rwsem_write(cpu)))
- BUG();
-
__cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 64ef737e7e72..4fd0006b1291 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -25,7 +25,7 @@
#include "cpufreq_governor.h"
-/* Conservative governor macors */
+/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -113,17 +113,20 @@ static void cs_check_cpu(int cpu, unsigned int load)
static void cs_dbs_timer(struct work_struct *work)
{
+ struct delayed_work *dw = to_delayed_work(work);
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cpu;
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
+ cpu);
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
+ dbs_check_cpu(&cs_dbs_data, cpu);
- dbs_check_cpu(&cs_dbs_data, cpu);
-
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ schedule_delayed_work_on(smp_processor_id(), dw, delay);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -141,7 +144,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
/*
* we only care if our internally tracked freq moves outside the 'valid'
- * ranges of freqency available to us otherwise we do not change it
+ * ranges of frequency available to us otherwise we do not change it
*/
if (dbs_info->requested_freq > policy->max
|| dbs_info->requested_freq < policy->min)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 6c5f1d383cdc..5a76086ff09b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -161,25 +161,48 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);
-static inline void dbs_timer_init(struct dbs_data *dbs_data,
- struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
+static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
+ unsigned int sampling_rate)
{
int delay = delay_for_sampling_rate(sampling_rate);
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
- INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
- schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
+ schedule_delayed_work_on(cpu, &cdbs->work, delay);
}
-static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
+static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
{
+ struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+
cancel_delayed_work_sync(&cdbs->work);
}
+/* Will return if we need to evaluate cpu load again or not */
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate)
+{
+ if (policy_is_shared(cdbs->cur_policy)) {
+ ktime_t time_now = ktime_get();
+ s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+
+ /* Do nothing if we recently have sampled */
+ if (delta_us < (s64)(sampling_rate / 2))
+ return false;
+ else
+ cdbs->time_stamp = time_now;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(need_load_eval);
+
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event)
{
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
+ struct cs_ops *cs_ops = NULL;
+ struct od_ops *od_ops = NULL;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
struct cpu_dbs_common_info *cpu_cdbs;
@@ -192,109 +215,111 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice;
+ cs_ops = dbs_data->gov_ops;
} else {
od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice;
+ od_ops = dbs_data->gov_ops;
}
switch (event) {
case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) || (!policy->cur))
+ if (!policy->cur)
return -EINVAL;
mutex_lock(&dbs_data->mutex);
- dbs_data->enable++;
- cpu_cdbs->cpu = cpu;
for_each_cpu(j, policy->cpus) {
- struct cpu_dbs_common_info *j_cdbs;
- j_cdbs = dbs_data->get_cpu_cdbs(j);
+ struct cpu_dbs_common_info *j_cdbs =
+ dbs_data->get_cpu_cdbs(j);
+ j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
&j_cdbs->prev_cpu_wall);
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- }
- /*
- * Start the timerschedule work, when this governor is used for
- * first time
- */
- if (dbs_data->enable != 1)
- goto second_time;
-
- rc = sysfs_create_group(cpufreq_global_kobject,
- dbs_data->attr_group);
- if (rc) {
- mutex_unlock(&dbs_data->mutex);
- return rc;
+ mutex_init(&j_cdbs->timer_mutex);
+ INIT_DEFERRABLE_WORK(&j_cdbs->work,
+ dbs_data->gov_dbs_timer);
}
- /* policy latency is in nS. Convert it to uS first */
- latency = policy->cpuinfo.transition_latency / 1000;
- if (latency == 0)
- latency = 1;
+ if (!policy->governor->initialized) {
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ dbs_data->attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_data->mutex);
+ return rc;
+ }
+ }
/*
* conservative does not implement micro like ondemand
* governor, thus we are bound to jiffes/HZ
*/
if (dbs_data->governor == GOV_CONSERVATIVE) {
- struct cs_ops *ops = dbs_data->gov_ops;
+ cs_dbs_info->down_skip = 0;
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
- cpufreq_register_notifier(ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+ if (!policy->governor->initialized) {
+ cpufreq_register_notifier(cs_ops->notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
- dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
- jiffies_to_usecs(10);
+ dbs_data->min_sampling_rate =
+ MIN_SAMPLING_RATE_RATIO *
+ jiffies_to_usecs(10);
+ }
} else {
- struct od_ops *ops = dbs_data->gov_ops;
+ od_dbs_info->rate_mult = 1;
+ od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ od_ops->powersave_bias_init_cpu(cpu);
- od_tuners->io_is_busy = ops->io_busy();
+ if (!policy->governor->initialized)
+ od_tuners->io_is_busy = od_ops->io_busy();
}
+ if (policy->governor->initialized)
+ goto unlock;
+
+ /* policy latency is in nS. Convert it to uS first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
*sampling_rate = max(dbs_data->min_sampling_rate, latency *
LATENCY_MULTIPLIER);
-
-second_time:
- if (dbs_data->governor == GOV_CONSERVATIVE) {
- cs_dbs_info->down_skip = 0;
- cs_dbs_info->enable = 1;
- cs_dbs_info->requested_freq = policy->cur;
- } else {
- struct od_ops *ops = dbs_data->gov_ops;
- od_dbs_info->rate_mult = 1;
- od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
- ops->powersave_bias_init_cpu(cpu);
- }
+unlock:
mutex_unlock(&dbs_data->mutex);
- mutex_init(&cpu_cdbs->timer_mutex);
- dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
+ /* Initiate timer time stamp */
+ cpu_cdbs->time_stamp = ktime_get();
+
+ for_each_cpu(j, policy->cpus)
+ dbs_timer_init(dbs_data, j, *sampling_rate);
break;
case CPUFREQ_GOV_STOP:
if (dbs_data->governor == GOV_CONSERVATIVE)
cs_dbs_info->enable = 0;
- dbs_timer_exit(cpu_cdbs);
+ for_each_cpu(j, policy->cpus)
+ dbs_timer_exit(dbs_data, j);
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
- dbs_data->enable--;
- if (!dbs_data->enable) {
- struct cs_ops *ops = dbs_data->gov_ops;
+ if (policy->governor->initialized == 1) {
sysfs_remove_group(cpufreq_global_kobject,
dbs_data->attr_group);
if (dbs_data->governor == GOV_CONSERVATIVE)
- cpufreq_unregister_notifier(ops->notifier_block,
+ cpufreq_unregister_notifier(cs_ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
mutex_unlock(&dbs_data->mutex);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index f6616540c53d..d2ac91150600 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -82,6 +82,7 @@ struct cpu_dbs_common_info {
* the governor or limits.
*/
struct mutex timer_mutex;
+ ktime_t time_stamp;
};
struct od_cpu_dbs_info_s {
@@ -108,7 +109,7 @@ struct od_dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
- unsigned int down_differential;
+ unsigned int adj_up_threshold;
unsigned int powersave_bias;
unsigned int io_is_busy;
};
@@ -129,7 +130,6 @@ struct dbs_data {
#define GOV_CONSERVATIVE 1
int governor;
unsigned int min_sampling_rate;
- unsigned int enable; /* number of CPUs using this policy */
struct attribute_group *attr_group;
void *tuners;
@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
+bool need_load_eval(struct cpu_dbs_common_info *cdbs,
+ unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 7731f7c7e79a..f3eb26cd848f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -26,7 +26,7 @@
#include "cpufreq_governor.h"
-/* On-demand governor macors */
+/* On-demand governor macros */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
@@ -47,7 +47,8 @@ static struct cpufreq_governor cpufreq_gov_ondemand;
static struct od_dbs_tuners od_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
- .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
+ .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
+ DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
};
@@ -65,7 +66,7 @@ static void ondemand_powersave_bias_init_cpu(int cpu)
* efficient idling at a higher frequency/voltage is.
* Pavel Machek says this is not so for various generations of AMD and old
* Intel systems.
- * Mike Chan (androidlcom) calis this is also not true for ARM.
+ * Mike Chan (android.com) claims this is also not true for ARM.
* Because of this, whitelist specific known (series) of CPUs by default, and
* leave all others up to the user.
*/
@@ -73,7 +74,7 @@ static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
/*
- * For Intel, Core 2 (model 15) andl later have an efficient idle.
+ * For Intel, Core 2 (model 15) and later have an efficient idle.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
@@ -158,8 +159,8 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
/*
* Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency Every sampling_rate, we look for
- * a the lowest frequency which can sustain the load while keeping idle time
+ * (default), then we try to increase frequency. Every sampling_rate, we look
+ * for the lowest frequency which can sustain the load while keeping idle time
* over 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
@@ -192,11 +193,9 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
- if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
- policy->cur) {
+ if (load_freq < od_tuners.adj_up_threshold * policy->cur) {
unsigned int freq_next;
- freq_next = load_freq / (od_tuners.up_threshold -
- od_tuners.down_differential);
+ freq_next = load_freq / od_tuners.adj_up_threshold;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
@@ -218,33 +217,42 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
static void od_dbs_timer(struct work_struct *work)
{
+ struct delayed_work *dw = to_delayed_work(work);
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
- unsigned int cpu = dbs_info->cdbs.cpu;
- int delay, sample_type = dbs_info->sample_type;
+ unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
+ struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+ cpu);
+ int delay, sample_type = core_dbs_info->sample_type;
+ bool eval_load;
- mutex_lock(&dbs_info->cdbs.timer_mutex);
+ mutex_lock(&core_dbs_info->cdbs.timer_mutex);
+ eval_load = need_load_eval(&core_dbs_info->cdbs,
+ od_tuners.sampling_rate);
/* Common NORMAL_SAMPLE setup */
- dbs_info->sample_type = OD_NORMAL_SAMPLE;
+ core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
- delay = dbs_info->freq_lo_jiffies;
- __cpufreq_driver_target(dbs_info->cdbs.cur_policy,
- dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = core_dbs_info->freq_lo_jiffies;
+ if (eval_load)
+ __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+ core_dbs_info->freq_lo,
+ CPUFREQ_RELATION_H);
} else {
- dbs_check_cpu(&od_dbs_data, cpu);
- if (dbs_info->freq_lo) {
+ if (eval_load)
+ dbs_check_cpu(&od_dbs_data, cpu);
+ if (core_dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
- dbs_info->sample_type = OD_SUB_SAMPLE;
- delay = dbs_info->freq_hi_jiffies;
+ core_dbs_info->sample_type = OD_SUB_SAMPLE;
+ delay = core_dbs_info->freq_hi_jiffies;
} else {
delay = delay_for_sampling_rate(od_tuners.sampling_rate
- * dbs_info->rate_mult);
+ * core_dbs_info->rate_mult);
}
}
- schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
- mutex_unlock(&dbs_info->cdbs.timer_mutex);
+ schedule_delayed_work_on(smp_processor_id(), dw, delay);
+ mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
/************************** sysfs interface ************************/
@@ -259,7 +267,7 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
* update_sampling_rate - update sampling rate effective immediately if needed.
* @new_rate: new sampling rate
*
- * If new rate is smaller than the old, simply updaing
+ * If new rate is smaller than the old, simply updating
* dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
* original sampling_rate was 1 second and the requested new sampling rate is 10
* ms because the user needs immediate reaction from ondemand governor, but not
@@ -287,7 +295,7 @@ static void update_sampling_rate(unsigned int new_rate)
cpufreq_cpu_put(policy);
continue;
}
- dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
+ dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy);
mutex_lock(&dbs_info->cdbs.timer_mutex);
@@ -306,8 +314,7 @@ static void update_sampling_rate(unsigned int new_rate)
cancel_delayed_work_sync(&dbs_info->cdbs.work);
mutex_lock(&dbs_info->cdbs.timer_mutex);
- schedule_delayed_work_on(dbs_info->cdbs.cpu,
- &dbs_info->cdbs.work,
+ schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
usecs_to_jiffies(new_rate));
}
@@ -351,6 +358,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
+ /* Calculate the new adj_up_threshold */
+ od_tuners.adj_up_threshold += input;
+ od_tuners.adj_up_threshold -= od_tuners.up_threshold;
+
od_tuners.up_threshold = input;
return count;
}
@@ -507,7 +518,8 @@ static int __init cpufreq_gov_dbs_init(void)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+ od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
+ MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 9d7732b81044..2fd779eb1ed1 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -24,12 +24,6 @@
static spinlock_t cpufreq_stats_lock;
-#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
-static struct freq_attr _attr_##_name = {\
- .attr = {.name = __stringify(_name), .mode = _mode, }, \
- .show = _show,\
-};
-
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
@@ -136,17 +130,17 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
return PAGE_SIZE;
return len;
}
-CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table);
+cpufreq_freq_attr_ro(trans_table);
#endif
-CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans);
-CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state);
+cpufreq_freq_attr_ro(total_trans);
+cpufreq_freq_attr_ro(time_in_state);
static struct attribute *default_attrs[] = {
- &_attr_total_trans.attr,
- &_attr_time_in_state.attr,
+ &total_trans.attr,
+ &time_in_state.attr,
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- &_attr_trans_table.attr,
+ &trans_table.attr,
#endif
NULL
};
@@ -170,11 +164,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
+
if (stat) {
+ pr_debug("%s: Free stat table\n", __func__);
kfree(stat->time_in_state);
kfree(stat);
+ per_cpu(cpufreq_stats_table, cpu) = NULL;
}
- per_cpu(cpufreq_stats_table, cpu) = NULL;
}
/* must be called early in the CPU removal sequence (before
@@ -183,8 +179,14 @@ static void cpufreq_stats_free_table(unsigned int cpu)
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (policy && policy->cpu == cpu)
+
+ if (!cpufreq_frequency_get_table(cpu))
+ return;
+
+ if (policy && !policy_is_shared(policy)) {
+ pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
+ }
if (policy)
cpufreq_cpu_put(policy);
}
@@ -262,6 +264,19 @@ error_get_fail:
return ret;
}
+static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+
+ pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
+ stat->cpu = policy->cpu;
+}
+
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -269,6 +284,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
+
+ if (val == CPUFREQ_UPDATE_POLICY_CPU) {
+ cpufreq_stats_update_policy_cpu(policy);
+ return 0;
+ }
+
if (val != CPUFREQ_NOTIFY)
return 0;
table = cpufreq_frequency_get_table(cpu);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index c8c3d293cc57..bbeb9c0720a6 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -118,8 +118,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
- if (!cpu_online(cpu))
- return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 4f154bc0ebe4..72f0c3efa76e 100644
--- a/drivers/cpufreq/db8500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -1,13 +1,13 @@
/*
* Copyright (C) STMicroelectronics 2009
- * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST-Ericsson SA 2010-2012
*
* License Terms: GNU General Public License v2
* Author: Sundar Iyer <sundar.iyer@stericsson.com>
* Author: Martin Persson <martin.persson@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
*/
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cpufreq.h>
@@ -15,27 +15,27 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <mach/id.h>
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
-static struct freq_attr *db8500_cpufreq_attr[] = {
+static struct freq_attr *dbx500_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
-static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy)
+static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
-static int db8500_cpufreq_target(struct cpufreq_policy *policy,
+static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
struct cpufreq_freqs freqs;
unsigned int idx;
+ int ret;
/* scale the target frequency to one of the extremes supported */
if (target_freq < policy->cpuinfo.min_freq)
@@ -44,10 +44,9 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
target_freq = policy->cpuinfo.max_freq;
/* Lookup the next frequency */
- if (cpufreq_frequency_table_target
- (policy, freq_table, target_freq, relation, &idx)) {
+ if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &idx))
return -EINVAL;
- }
freqs.old = policy->cur;
freqs.new = freq_table[idx].frequency;
@@ -60,9 +59,12 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* update armss clk frequency */
- if (clk_set_rate(armss_clk, freq_table[idx].frequency * 1000)) {
- pr_err("db8500-cpufreq: Failed to update armss clk\n");
- return -EINVAL;
+ ret = clk_set_rate(armss_clk, freqs.new * 1000);
+
+ if (ret) {
+ pr_err("dbx500-cpufreq: Failed to set armss_clk to %d Hz: error %d\n",
+ freqs.new * 1000, ret);
+ return ret;
}
/* post change notification */
@@ -72,7 +74,7 @@ static int db8500_cpufreq_target(struct cpufreq_policy *policy,
return 0;
}
-static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
+static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
{
int i = 0;
unsigned long freq = clk_get_rate(armss_clk) / 1000;
@@ -84,40 +86,26 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu)
}
/* We could not find a corresponding frequency. */
- pr_err("db8500-cpufreq: Failed to find cpufreq speed\n");
+ pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
return 0;
}
-static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
+static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
- int i = 0;
int res;
- armss_clk = clk_get(NULL, "armss");
- if (IS_ERR(armss_clk)) {
- pr_err("db8500-cpufreq : Failed to get armss clk\n");
- return PTR_ERR(armss_clk);
- }
-
- pr_info("db8500-cpufreq : Available frequencies:\n");
- while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
- pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
- i++;
- }
-
/* get policy fields based on the table */
res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!res)
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
else {
- pr_err("db8500-cpufreq : Failed to read policy table\n");
- clk_put(armss_clk);
+ pr_err("dbx500-cpufreq: Failed to read policy table\n");
return res;
}
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
- policy->cur = db8500_cpufreq_getspeed(policy->cpu);
+ policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/*
@@ -128,52 +116,59 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
/* policy sharing between dual CPUs */
- cpumask_copy(policy->cpus, cpu_present_mask);
-
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ cpumask_setall(policy->cpus);
return 0;
}
-static struct cpufreq_driver db8500_cpufreq_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = db8500_cpufreq_verify_speed,
- .target = db8500_cpufreq_target,
- .get = db8500_cpufreq_getspeed,
- .init = db8500_cpufreq_init,
- .name = "DB8500",
- .attr = db8500_cpufreq_attr,
+static struct cpufreq_driver dbx500_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .verify = dbx500_cpufreq_verify_speed,
+ .target = dbx500_cpufreq_target,
+ .get = dbx500_cpufreq_getspeed,
+ .init = dbx500_cpufreq_init,
+ .name = "DBX500",
+ .attr = dbx500_cpufreq_attr,
};
-static int db8500_cpufreq_probe(struct platform_device *pdev)
+static int dbx500_cpufreq_probe(struct platform_device *pdev)
{
- freq_table = dev_get_platdata(&pdev->dev);
+ int i = 0;
+ freq_table = dev_get_platdata(&pdev->dev);
if (!freq_table) {
- pr_err("db8500-cpufreq: Failed to fetch cpufreq table\n");
+ pr_err("dbx500-cpufreq: Failed to fetch cpufreq table\n");
return -ENODEV;
}
- return cpufreq_register_driver(&db8500_cpufreq_driver);
+ armss_clk = clk_get(&pdev->dev, "armss");
+ if (IS_ERR(armss_clk)) {
+ pr_err("dbx500-cpufreq: Failed to get armss clk\n");
+ return PTR_ERR(armss_clk);
+ }
+
+ pr_info("dbx500-cpufreq: Available frequencies:\n");
+ while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
+ pr_info(" %d Mhz\n", freq_table[i].frequency/1000);
+ i++;
+ }
+
+ return cpufreq_register_driver(&dbx500_cpufreq_driver);
}
-static struct platform_driver db8500_cpufreq_plat_driver = {
+static struct platform_driver dbx500_cpufreq_plat_driver = {
.driver = {
- .name = "cpufreq-u8500",
+ .name = "cpufreq-ux500",
.owner = THIS_MODULE,
},
- .probe = db8500_cpufreq_probe,
+ .probe = dbx500_cpufreq_probe,
};
-static int __init db8500_cpufreq_register(void)
+static int __init dbx500_cpufreq_register(void)
{
- if (!cpu_is_u8500_family())
- return -ENODEV;
-
- pr_info("cpufreq for DB8500 started\n");
- return platform_driver_register(&db8500_cpufreq_plat_driver);
+ return platform_driver_register(&dbx500_cpufreq_plat_driver);
}
-device_initcall(db8500_cpufreq_register);
+device_initcall(dbx500_cpufreq_register);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("cpufreq driver for DB8500");
+MODULE_DESCRIPTION("cpufreq driver for DBX500");
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 7012ea8bf1e7..78057a357ddb 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -18,10 +18,10 @@
#include <linux/cpufreq.h>
#include <linux/suspend.h>
-#include <mach/cpufreq.h>
-
#include <plat/cpu.h>
+#include "exynos-cpufreq.h"
+
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
@@ -42,51 +42,56 @@ static unsigned int exynos_getspeed(unsigned int cpu)
return clk_get_rate(exynos_info->cpu_clk) / 1000;
}
-static int exynos_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
+static int exynos_cpufreq_get_index(unsigned int freq)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ int index;
+
+ for (index = 0;
+ freq_table[index].frequency != CPUFREQ_TABLE_END; index++)
+ if (freq_table[index].frequency == freq)
+ break;
+
+ if (freq_table[index].frequency == CPUFREQ_TABLE_END)
+ return -EINVAL;
+
+ return index;
+}
+
+static int exynos_cpufreq_scale(unsigned int target_freq)
{
- unsigned int index, old_index;
- unsigned int arm_volt, safe_arm_volt = 0;
- int ret = 0;
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
unsigned int *volt_table = exynos_info->volt_table;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0);
+ unsigned int arm_volt, safe_arm_volt = 0;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
-
- mutex_lock(&cpufreq_lock);
+ int index, old_index;
+ int ret = 0;
freqs.old = policy->cur;
+ freqs.new = target_freq;
+ freqs.cpu = policy->cpu;
- if (frequency_locked && target_freq != locking_frequency) {
- ret = -EAGAIN;
+ if (freqs.new == freqs.old)
goto out;
- }
/*
* The policy max have been changed so that we cannot get proper
* old_index with cpufreq_frequency_table_target(). Thus, ignore
* policy and get the index from the raw freqeuncy table.
*/
- for (old_index = 0;
- freq_table[old_index].frequency != CPUFREQ_TABLE_END;
- old_index++)
- if (freq_table[old_index].frequency == freqs.old)
- break;
-
- if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) {
- ret = -EINVAL;
+ old_index = exynos_cpufreq_get_index(freqs.old);
+ if (old_index < 0) {
+ ret = old_index;
goto out;
}
- if (cpufreq_frequency_table_target(policy, freq_table,
- target_freq, relation, &index)) {
- ret = -EINVAL;
+ index = exynos_cpufreq_get_index(target_freq);
+ if (index < 0) {
+ ret = index;
goto out;
}
- freqs.new = freq_table[index].frequency;
- freqs.cpu = policy->cpu;
-
/*
* ARM clock source will be changed APLL to MPLL temporary
* To support this level, need to control regulator for
@@ -106,15 +111,25 @@ static int exynos_target(struct cpufreq_policy *policy,
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
- regulator_set_voltage(arm_regulator, arm_volt,
- arm_volt);
+ ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, arm_volt);
+ goto out;
+ }
}
- if (safe_arm_volt)
- regulator_set_voltage(arm_regulator, safe_arm_volt,
+ if (safe_arm_volt) {
+ ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
safe_arm_volt);
- if (freqs.new != freqs.old)
- exynos_info->set_freq(old_index, index);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, safe_arm_volt);
+ goto out;
+ }
+ }
+
+ exynos_info->set_freq(old_index, index);
for_each_cpu(freqs.cpu, policy->cpus)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
@@ -125,8 +140,44 @@ static int exynos_target(struct cpufreq_policy *policy,
/* down the voltage after frequency change */
regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
+ if (ret) {
+ pr_err("%s: failed to set cpu voltage to %d\n",
+ __func__, arm_volt);
+ goto out;
+ }
+ }
+
+out:
+
+ cpufreq_cpu_put(policy);
+
+ return ret;
+}
+
+static int exynos_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ unsigned int index;
+ unsigned int new_freq;
+ int ret = 0;
+
+ mutex_lock(&cpufreq_lock);
+
+ if (frequency_locked)
+ goto out;
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index)) {
+ ret = -EINVAL;
+ goto out;
}
+ new_freq = freq_table[index].frequency;
+
+ ret = exynos_cpufreq_scale(new_freq);
+
out:
mutex_unlock(&cpufreq_lock);
@@ -163,51 +214,26 @@ static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
- static unsigned int saved_frequency;
- unsigned int temp;
+ int ret;
- mutex_lock(&cpufreq_lock);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
- if (frequency_locked)
- goto out;
-
+ mutex_lock(&cpufreq_lock);
frequency_locked = true;
+ mutex_unlock(&cpufreq_lock);
- if (locking_frequency) {
- saved_frequency = exynos_getspeed(0);
+ ret = exynos_cpufreq_scale(locking_frequency);
+ if (ret < 0)
+ return NOTIFY_BAD;
- mutex_unlock(&cpufreq_lock);
- exynos_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
- }
break;
case PM_POST_SUSPEND:
- if (saved_frequency) {
- /*
- * While frequency_locked, only locking_frequency
- * is valid for target(). In order to use
- * saved_frequency while keeping frequency_locked,
- * we temporarly overwrite locking_frequency.
- */
- temp = locking_frequency;
- locking_frequency = saved_frequency;
-
- mutex_unlock(&cpufreq_lock);
- exynos_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
-
- locking_frequency = temp;
- }
+ mutex_lock(&cpufreq_lock);
frequency_locked = false;
+ mutex_unlock(&cpufreq_lock);
break;
}
-out:
- mutex_unlock(&cpufreq_lock);
return NOTIFY_OK;
}
@@ -222,35 +248,34 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
- locking_frequency = exynos_getspeed(0);
-
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
- /*
- * EXYNOS4 multi-core processors has 2 cores
- * that the frequency cannot be set independently.
- * Each cpu is bound to the same speed.
- * So the affected cpu is all of the cpus.
- */
- if (num_online_cpus() == 1) {
- cpumask_copy(policy->related_cpus, cpu_possible_mask);
- cpumask_copy(policy->cpus, cpu_online_mask);
- } else {
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
- cpumask_setall(policy->cpus);
- }
+ cpumask_setall(policy->cpus);
return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
}
+static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *exynos_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos_verify_speed,
.target = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
+ .exit = exynos_cpufreq_cpu_exit,
.name = "exynos_cpufreq",
+ .attr = exynos_cpufreq_attr,
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
@@ -288,6 +313,8 @@ static int __init exynos_cpufreq_init(void)
goto err_vdd_arm;
}
+ locking_frequency = exynos_getspeed(0);
+
register_pm_notifier(&exynos_cpufreq_nb);
if (cpufreq_register_driver(&exynos_driver)) {
@@ -299,8 +326,7 @@ static int __init exynos_cpufreq_init(void)
err_cpufreq:
unregister_pm_notifier(&exynos_cpufreq_nb);
- if (!IS_ERR(arm_regulator))
- regulator_put(arm_regulator);
+ regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
pr_debug("%s: failed initialization\n", __func__);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
new file mode 100644
index 000000000000..92b852ee5ddc
--- /dev/null
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPUFreq support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+enum cpufreq_level_index {
+ L0, L1, L2, L3, L4,
+ L5, L6, L7, L8, L9,
+ L10, L11, L12, L13, L14,
+ L15, L16, L17, L18, L19,
+ L20,
+};
+
+#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
+ { \
+ .freq = (f) * 1000, \
+ .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
+ (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
+ .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
+ .mps = ((m) << 16 | (p) << 8 | (s)), \
+ }
+
+struct apll_freq {
+ unsigned int freq;
+ u32 clk_div_cpu0;
+ u32 clk_div_cpu1;
+ u32 mps;
+};
+
+struct exynos_dvfs_info {
+ unsigned long mpll_freq_khz;
+ unsigned int pll_safe_idx;
+ struct clk *cpu_clk;
+ unsigned int *volt_table;
+ struct cpufreq_frequency_table *freq_table;
+ void (*set_freq)(unsigned int, unsigned int);
+ bool (*need_apll_change)(unsigned int, unsigned int);
+};
+
+extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
+extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index fb148fa27678..add7fbec4fc9 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -18,99 +18,40 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END L5
-
-static int max_support_idx = L0;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
-};
-
-static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
+static unsigned int exynos4210_volt_table[] = {
1250000, 1150000, 1050000, 975000, 950000,
};
-
-static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos4210_freq_table[] = {
- {L0, 1200*1000},
- {L1, 1000*1000},
- {L2, 800*1000},
- {L3, 500*1000},
- {L4, 200*1000},
+ {L0, 1200 * 1000},
+ {L1, 1000 * 1000},
+ {L2, 800 * 1000},
+ {L3, 500 * 1000},
+ {L4, 200 * 1000},
{0, CPUFREQ_TABLE_END},
};
-static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
+static struct apll_freq apll_freq_4210[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, RESERVED
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
-
- /* ARM L0: 1200MHz */
- { 0, 3, 7, 3, 4, 1, 7 },
-
- /* ARM L1: 1000MHz */
- { 0, 3, 7, 3, 4, 1, 7 },
-
- /* ARM L2: 800MHz */
- { 0, 3, 7, 3, 3, 1, 7 },
-
- /* ARM L3: 500MHz */
- { 0, 3, 7, 3, 3, 1, 7 },
-
- /* ARM L4: 200MHz */
- { 0, 1, 3, 1, 3, 1, 0 },
-};
-
-static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
- /*
- * Clock divider value for following
- * { DIVCOPY, DIVHPM }
- */
-
- /* ARM L0: 1200MHz */
- { 5, 0 },
-
- /* ARM L1: 1000MHz */
- { 4, 0 },
-
- /* ARM L2: 800MHz */
- { 3, 0 },
-
- /* ARM L3: 500MHz */
- { 3, 0 },
-
- /* ARM L4: 200MHz */
- { 3, 0 },
-};
-
-static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1200MHz */
- ((150 << 16) | (3 << 8) | 1),
-
- /* APLL FOUT L1: 1000MHz */
- ((250 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L2: 800MHz */
- ((200 << 16) | (6 << 8) | 1),
-
- /* APLL FOUT L3: 500MHz */
- ((250 << 16) | (6 << 8) | 2),
-
- /* APLL FOUT L4: 200MHz */
- ((200 << 16) | (6 << 8) | 3),
+ APLL_FREQ(1200, 0, 3, 7, 3, 4, 1, 7, 0, 5, 0, 0, 150, 3, 1),
+ APLL_FREQ(1000, 0, 3, 7, 3, 4, 1, 7, 0, 4, 0, 0, 250, 6, 1),
+ APLL_FREQ(800, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 200, 6, 1),
+ APLL_FREQ(500, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 250, 6, 2),
+ APLL_FREQ(200, 0, 1, 3, 1, 3, 1, 0, 0, 3, 0, 0, 200, 6, 3),
};
static void exynos4210_set_clkdiv(unsigned int div_index)
@@ -119,7 +60,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos4210_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_4210[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
@@ -129,12 +70,7 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU1 */
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
-
- tmp &= ~((0x7 << 4) | 0x7);
-
- tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
- (clkdiv_cpu1[div_index][1] << 0));
+ tmp = apll_freq_4210[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
@@ -162,7 +98,7 @@ static void exynos4210_set_apll(unsigned int index)
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4210_apll_pms_table[index];
+ tmp |= apll_freq_4210[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
@@ -179,10 +115,10 @@ static void exynos4210_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
- unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
+ unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -200,7 +136,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
/* Clock Configuration Procedure */
@@ -214,7 +150,7 @@ static void exynos4210_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
@@ -231,8 +167,6 @@ static void exynos4210_set_frequency(unsigned int old_index,
int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
cpu_clk = clk_get(NULL, "armclk");
@@ -253,33 +187,9 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
-
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
- EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
- EXYNOS4_CLKDIV_CPU0_ATB_MASK |
- EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
- EXYNOS4_CLKDIV_CPU0_APLL_MASK);
-
- tmp |= ((clkdiv_cpu0[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
-
- exynos4210_clkdiv_table[i].clkdiv = tmp;
- }
-
info->mpll_freq_khz = rate;
- info->pm_lock_idx = L2;
+ /* 800Mhz */
info->pll_safe_idx = L2;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
@@ -289,14 +199,11 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
return 0;
err_mout_apll:
- if (!IS_ERR(mout_mpll))
- clk_put(mout_mpll);
+ clk_put(mout_mpll);
err_mout_mpll:
- if (!IS_ERR(moutcore))
- clk_put(moutcore);
+ clk_put(moutcore);
err_moutcore:
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
+ clk_put(cpu_clk);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
index 8c5a7afa5b0b..08b7477b0aa2 100644
--- a/drivers/cpufreq/exynos4x12-cpufreq.c
+++ b/drivers/cpufreq/exynos4x12-cpufreq.c
@@ -18,28 +18,21 @@
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END (L13 + 1)
-
-static int max_support_idx;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
+#include "exynos-cpufreq.h"
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
- unsigned int clkdiv1;
+static unsigned int exynos4x12_volt_table[] = {
+ 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
+ 1000000, 987500, 975000, 950000, 925000, 900000, 900000
};
-static unsigned int exynos4x12_volt_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
- {L0, 1500 * 1000},
+ {L0, CPUFREQ_ENTRY_INVALID},
{L1, 1400 * 1000},
{L2, 1300 * 1000},
{L3, 1200 * 1000},
@@ -56,247 +49,54 @@ static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct cpufreq_clkdiv exynos4x12_clkdiv_table[CPUFREQ_LEVEL_END];
+static struct apll_freq *apll_freq_4x12;
-static unsigned int clkdiv_cpu0_4212[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_4212[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
- /* ARM L0: 1500 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L1: 1400 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L2: 1300 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L3: 1200 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L4: 1100 MHz */
- { 0, 3, 6, 0, 4, 1, 2, 0 },
-
- /* ARM L5: 1000 MHz */
- { 0, 2, 5, 0, 4, 1, 1, 0 },
-
- /* ARM L6: 900 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L7: 800 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L8: 700 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L9: 600 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L10: 500 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L11: 400 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L12: 300 MHz */
- { 0, 2, 4, 0, 2, 1, 1, 0 },
-
- /* ARM L13: 200 MHz */
- { 0, 1, 3, 0, 1, 1, 1, 0 },
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
};
-static unsigned int clkdiv_cpu0_4412[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_4412[] = {
/*
- * Clock divider value for following
- * { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
- * DIVATB, DIVPCLK_DBG, DIVAPLL, DIVCORE2 }
- */
- /* ARM L0: 1500 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L1: 1400 MHz */
- { 0, 3, 7, 0, 6, 1, 2, 0 },
-
- /* ARM L2: 1300 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L3: 1200 MHz */
- { 0, 3, 7, 0, 5, 1, 2, 0 },
-
- /* ARM L4: 1100 MHz */
- { 0, 3, 6, 0, 4, 1, 2, 0 },
-
- /* ARM L5: 1000 MHz */
- { 0, 2, 5, 0, 4, 1, 1, 0 },
-
- /* ARM L6: 900 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L7: 800 MHz */
- { 0, 2, 5, 0, 3, 1, 1, 0 },
-
- /* ARM L8: 700 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L9: 600 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L10: 500 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L11: 400 MHz */
- { 0, 2, 4, 0, 3, 1, 1, 0 },
-
- /* ARM L12: 300 MHz */
- { 0, 2, 4, 0, 2, 1, 1, 0 },
-
- /* ARM L13: 200 MHz */
- { 0, 1, 3, 0, 1, 1, 1, 0 },
-};
-
-static unsigned int clkdiv_cpu1_4212[CPUFREQ_LEVEL_END][2] = {
- /* Clock divider value for following
- * { DIVCOPY, DIVHPM }
+ * values:
+ * freq
+ * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
+ * clock divider for COPY, HPM, CORES
+ * PLL M, P, S
*/
- /* ARM L0: 1500 MHz */
- { 6, 0 },
-
- /* ARM L1: 1400 MHz */
- { 6, 0 },
-
- /* ARM L2: 1300 MHz */
- { 5, 0 },
-
- /* ARM L3: 1200 MHz */
- { 5, 0 },
-
- /* ARM L4: 1100 MHz */
- { 4, 0 },
-
- /* ARM L5: 1000 MHz */
- { 4, 0 },
-
- /* ARM L6: 900 MHz */
- { 3, 0 },
-
- /* ARM L7: 800 MHz */
- { 3, 0 },
-
- /* ARM L8: 700 MHz */
- { 3, 0 },
-
- /* ARM L9: 600 MHz */
- { 3, 0 },
-
- /* ARM L10: 500 MHz */
- { 3, 0 },
-
- /* ARM L11: 400 MHz */
- { 3, 0 },
-
- /* ARM L12: 300 MHz */
- { 3, 0 },
-
- /* ARM L13: 200 MHz */
- { 3, 0 },
-};
-
-static unsigned int clkdiv_cpu1_4412[CPUFREQ_LEVEL_END][3] = {
- /* Clock divider value for following
- * { DIVCOPY, DIVHPM, DIVCORES }
- */
- /* ARM L0: 1500 MHz */
- { 6, 0, 7 },
-
- /* ARM L1: 1400 MHz */
- { 6, 0, 6 },
-
- /* ARM L2: 1300 MHz */
- { 5, 0, 6 },
-
- /* ARM L3: 1200 MHz */
- { 5, 0, 5 },
-
- /* ARM L4: 1100 MHz */
- { 4, 0, 5 },
-
- /* ARM L5: 1000 MHz */
- { 4, 0, 4 },
-
- /* ARM L6: 900 MHz */
- { 3, 0, 4 },
-
- /* ARM L7: 800 MHz */
- { 3, 0, 3 },
-
- /* ARM L8: 700 MHz */
- { 3, 0, 3 },
-
- /* ARM L9: 600 MHz */
- { 3, 0, 2 },
-
- /* ARM L10: 500 MHz */
- { 3, 0, 2 },
-
- /* ARM L11: 400 MHz */
- { 3, 0, 1 },
-
- /* ARM L12: 300 MHz */
- { 3, 0, 1 },
-
- /* ARM L13: 200 MHz */
- { 3, 0, 0 },
-};
-
-static unsigned int exynos4x12_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1500 MHz */
- ((250 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L1: 1400 MHz */
- ((175 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L2: 1300 MHz */
- ((325 << 16) | (6 << 8) | (0x0)),
-
- /* APLL FOUT L3: 1200 MHz */
- ((200 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L4: 1100 MHz */
- ((275 << 16) | (6 << 8) | (0x0)),
-
- /* APLL FOUT L5: 1000 MHz */
- ((125 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L6: 900 MHz */
- ((150 << 16) | (4 << 8) | (0x0)),
-
- /* APLL FOUT L7: 800 MHz */
- ((100 << 16) | (3 << 8) | (0x0)),
-
- /* APLL FOUT L8: 700 MHz */
- ((175 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L9: 600 MHz */
- ((200 << 16) | (4 << 8) | (0x1)),
-
- /* APLL FOUT L10: 500 MHz */
- ((125 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L11 400 MHz */
- ((100 << 16) | (3 << 8) | (0x1)),
-
- /* APLL FOUT L12: 300 MHz */
- ((200 << 16) | (4 << 8) | (0x2)),
-
- /* APLL FOUT L13: 200 MHz */
- ((100 << 16) | (3 << 8) | (0x2)),
-};
-
-static const unsigned int asv_voltage_4x12[CPUFREQ_LEVEL_END] = {
- 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
- 1000000, 987500, 975000, 950000, 925000, 900000, 900000
+ APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
+ APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
+ APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
+ APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
+ APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
+ APLL_FREQ(900, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
+ APLL_FREQ(800, 0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
+ APLL_FREQ(700, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
+ APLL_FREQ(600, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
+ APLL_FREQ(500, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
+ APLL_FREQ(400, 0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
+ APLL_FREQ(300, 0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
};
static void exynos4x12_set_clkdiv(unsigned int div_index)
@@ -306,7 +106,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos4x12_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_4x12[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
@@ -314,7 +114,7 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
cpu_relax();
/* Change Divider - CPU1 */
- tmp = exynos4x12_clkdiv_table[div_index].clkdiv1;
+ tmp = apll_freq_4x12[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
if (soc_is_exynos4212())
@@ -341,14 +141,14 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
- pdiv = ((exynos4x12_apll_pms_table[index] >> 8) & 0x3f);
+ pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
__raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4x12_apll_pms_table[index];
+ tmp |= apll_freq_4x12[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
@@ -367,10 +167,10 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
-bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = exynos4x12_apll_pms_table[old_index] >> 8;
- unsigned int new_pm = exynos4x12_apll_pms_table[new_index] >> 8;
+ unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -387,7 +187,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
@@ -402,7 +202,7 @@ static void exynos4x12_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4x12_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
exynos4x12_set_clkdiv(new_index);
@@ -416,27 +216,10 @@ static void exynos4x12_set_frequency(unsigned int old_index,
}
}
-static void __init set_volt_table(void)
-{
- unsigned int i;
-
- max_support_idx = L1;
-
- /* Not supported */
- exynos4x12_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID;
-
- for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
- exynos4x12_volt_table[i] = asv_voltage_4x12[i];
-}
-
int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
- set_volt_table();
-
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -455,66 +238,14 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
-
- exynos4x12_clkdiv_table[i].index = i;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
-
- tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
- EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
- EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
- EXYNOS4_CLKDIV_CPU0_ATB_MASK |
- EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
- EXYNOS4_CLKDIV_CPU0_APLL_MASK);
-
- if (soc_is_exynos4212()) {
- tmp |= ((clkdiv_cpu0_4212[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0_4212[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0_4212[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0_4212[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0_4212[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0_4212[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0_4212[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
- } else {
- tmp &= ~EXYNOS4_CLKDIV_CPU0_CORE2_MASK;
-
- tmp |= ((clkdiv_cpu0_4412[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0_4412[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0_4412[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0_4412[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0_4412[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0_4412[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0_4412[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT) |
- (clkdiv_cpu0_4412[i][7] << EXYNOS4_CLKDIV_CPU0_CORE2_SHIFT));
- }
-
- exynos4x12_clkdiv_table[i].clkdiv = tmp;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
-
- if (soc_is_exynos4212()) {
- tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
- EXYNOS4_CLKDIV_CPU1_HPM_MASK);
- tmp |= ((clkdiv_cpu1_4212[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
- (clkdiv_cpu1_4212[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT));
- } else {
- tmp &= ~(EXYNOS4_CLKDIV_CPU1_COPY_MASK |
- EXYNOS4_CLKDIV_CPU1_HPM_MASK |
- EXYNOS4_CLKDIV_CPU1_CORES_MASK);
- tmp |= ((clkdiv_cpu1_4412[i][0] << EXYNOS4_CLKDIV_CPU1_COPY_SHIFT) |
- (clkdiv_cpu1_4412[i][1] << EXYNOS4_CLKDIV_CPU1_HPM_SHIFT) |
- (clkdiv_cpu1_4412[i][2] << EXYNOS4_CLKDIV_CPU1_CORES_SHIFT));
- }
- exynos4x12_clkdiv_table[i].clkdiv1 = tmp;
- }
+ if (soc_is_exynos4212())
+ apll_freq_4x12 = apll_freq_4212;
+ else
+ apll_freq_4x12 = apll_freq_4412;
info->mpll_freq_khz = rate;
- info->pm_lock_idx = L5;
+ /* 800Mhz */
info->pll_safe_idx = L7;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos4x12_volt_table;
info->freq_table = exynos4x12_freq_table;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index e64c253cb169..9fae466d7746 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -19,25 +19,21 @@
#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/cpufreq.h>
-#define CPUFREQ_LEVEL_END (L15 + 1)
+#include "exynos-cpufreq.h"
-static int max_support_idx;
-static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-struct cpufreq_clkdiv {
- unsigned int index;
- unsigned int clkdiv;
- unsigned int clkdiv1;
+static unsigned int exynos5250_volt_table[] = {
+ 1300000, 1250000, 1225000, 1200000, 1150000,
+ 1125000, 1100000, 1075000, 1050000, 1025000,
+ 1012500, 1000000, 975000, 950000, 937500,
+ 925000
};
-static unsigned int exynos5250_volt_table[CPUFREQ_LEVEL_END];
-
static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{L0, 1700 * 1000},
{L1, 1600 * 1000},
@@ -47,8 +43,8 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{L5, 1200 * 1000},
{L6, 1100 * 1000},
{L7, 1000 * 1000},
- {L8, 900 * 1000},
- {L9, 800 * 1000},
+ {L8, 900 * 1000},
+ {L9, 800 * 1000},
{L10, 700 * 1000},
{L11, 600 * 1000},
{L12, 500 * 1000},
@@ -58,78 +54,30 @@ static struct cpufreq_frequency_table exynos5250_freq_table[] = {
{0, CPUFREQ_TABLE_END},
};
-static struct cpufreq_clkdiv exynos5250_clkdiv_table[CPUFREQ_LEVEL_END];
-
-static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = {
+static struct apll_freq apll_freq_5250[] = {
/*
- * Clock divider value for following
- * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 }
- */
- { 0, 3, 7, 7, 7, 3, 5, 0 }, /* 1700 MHz */
- { 0, 3, 7, 7, 7, 1, 4, 0 }, /* 1600 MHz */
- { 0, 2, 7, 7, 7, 1, 4, 0 }, /* 1500 MHz */
- { 0, 2, 7, 7, 6, 1, 4, 0 }, /* 1400 MHz */
- { 0, 2, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */
- { 0, 2, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */
- { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1100 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */
- { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 800 MHz */
- { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */
- { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 600 MHz */
- { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */
- { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 400 MHz */
- { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */
- { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */
-};
-
-static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
- /* Clock divider value for following
- * { COPY, HPM }
+ * values:
+ * freq
+ * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
+ * clock divider for COPY, HPM, RESERVED
+ * PLL M, P, S
*/
- { 0, 2 }, /* 1700 MHz */
- { 0, 2 }, /* 1600 MHz */
- { 0, 2 }, /* 1500 MHz */
- { 0, 2 }, /* 1400 MHz */
- { 0, 2 }, /* 1300 MHz */
- { 0, 2 }, /* 1200 MHz */
- { 0, 2 }, /* 1100 MHz */
- { 0, 2 }, /* 1000 MHz */
- { 0, 2 }, /* 900 MHz */
- { 0, 2 }, /* 800 MHz */
- { 0, 2 }, /* 700 MHz */
- { 0, 2 }, /* 600 MHz */
- { 0, 2 }, /* 500 MHz */
- { 0, 2 }, /* 400 MHz */
- { 0, 2 }, /* 300 MHz */
- { 0, 2 }, /* 200 MHz */
-};
-
-static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
- ((425 << 16) | (6 << 8) | 0), /* 1700 MHz */
- ((200 << 16) | (3 << 8) | 0), /* 1600 MHz */
- ((250 << 16) | (4 << 8) | 0), /* 1500 MHz */
- ((175 << 16) | (3 << 8) | 0), /* 1400 MHz */
- ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */
- ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */
- ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */
- ((125 << 16) | (3 << 8) | 0), /* 1000 MHz */
- ((150 << 16) | (4 << 8) | 0), /* 900 MHz */
- ((100 << 16) | (3 << 8) | 0), /* 800 MHz */
- ((175 << 16) | (3 << 8) | 1), /* 700 MHz */
- ((200 << 16) | (4 << 8) | 1), /* 600 MHz */
- ((125 << 16) | (3 << 8) | 1), /* 500 MHz */
- ((100 << 16) | (3 << 8) | 1), /* 400 MHz */
- ((200 << 16) | (4 << 8) | 2), /* 300 MHz */
- ((100 << 16) | (3 << 8) | 2), /* 200 MHz */
-};
-
-/* ASV group voltage table */
-static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = {
- 1300000, 1250000, 1225000, 1200000, 1150000,
- 1125000, 1100000, 1075000, 1050000, 1025000,
- 1012500, 1000000, 975000, 950000, 937500,
- 925000
+ APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
+ APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
+ APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
+ APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
+ APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
+ APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
+ APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
+ APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
+ APLL_FREQ(900, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
+ APLL_FREQ(800, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
+ APLL_FREQ(700, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
+ APLL_FREQ(600, 0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
+ APLL_FREQ(500, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
+ APLL_FREQ(400, 0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
+ APLL_FREQ(300, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
+ APLL_FREQ(200, 0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
};
static void set_clkdiv(unsigned int div_index)
@@ -138,7 +86,7 @@ static void set_clkdiv(unsigned int div_index)
/* Change Divider - CPU0 */
- tmp = exynos5250_clkdiv_table[div_index].clkdiv;
+ tmp = apll_freq_5250[div_index].clk_div_cpu0;
__raw_writel(tmp, EXYNOS5_CLKDIV_CPU0);
@@ -146,7 +94,7 @@ static void set_clkdiv(unsigned int div_index)
cpu_relax();
/* Change Divider - CPU1 */
- tmp = exynos5250_clkdiv_table[div_index].clkdiv1;
+ tmp = apll_freq_5250[div_index].clk_div_cpu1;
__raw_writel(tmp, EXYNOS5_CLKDIV_CPU1);
@@ -169,14 +117,14 @@ static void set_apll(unsigned int new_index,
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
- pdiv = ((exynos5_apll_pms_table[new_index] >> 8) & 0x3f);
+ pdiv = ((apll_freq_5250[new_index].mps >> 8) & 0x3f);
__raw_writel((pdiv * 250), EXYNOS5_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos5_apll_pms_table[new_index];
+ tmp |= apll_freq_5250[new_index].mps;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
/* 4. wait_lock_time */
@@ -196,10 +144,10 @@ static void set_apll(unsigned int new_index,
}
-bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
+static bool exynos5250_pms_change(unsigned int old_index, unsigned int new_index)
{
- unsigned int old_pm = (exynos5_apll_pms_table[old_index] >> 8);
- unsigned int new_pm = (exynos5_apll_pms_table[new_index] >> 8);
+ unsigned int old_pm = apll_freq_5250[old_index].mps >> 8;
+ unsigned int new_pm = apll_freq_5250[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
@@ -216,7 +164,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos5_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_5250[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
} else {
@@ -231,7 +179,7 @@ static void exynos5250_set_frequency(unsigned int old_index,
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS5_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos5_apll_pms_table[new_index] & 0x7);
+ tmp |= apll_freq_5250[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS5_APLL_CON0);
/* 2. Change the system clock divider values */
set_clkdiv(new_index);
@@ -245,24 +193,10 @@ static void exynos5250_set_frequency(unsigned int old_index,
}
}
-static void __init set_volt_table(void)
-{
- unsigned int i;
-
- max_support_idx = L0;
-
- for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
- exynos5250_volt_table[i] = asv_voltage_5250[i];
-}
-
int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
{
- int i;
- unsigned int tmp;
unsigned long rate;
- set_volt_table();
-
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -281,44 +215,9 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
if (IS_ERR(mout_apll))
goto err_mout_apll;
- for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
-
- exynos5250_clkdiv_table[i].index = i;
-
- tmp = __raw_readl(EXYNOS5_CLKDIV_CPU0);
-
- tmp &= ~((0x7 << 0) | (0x7 << 4) | (0x7 << 8) |
- (0x7 << 12) | (0x7 << 16) | (0x7 << 20) |
- (0x7 << 24) | (0x7 << 28));
-
- tmp |= ((clkdiv_cpu0_5250[i][0] << 0) |
- (clkdiv_cpu0_5250[i][1] << 4) |
- (clkdiv_cpu0_5250[i][2] << 8) |
- (clkdiv_cpu0_5250[i][3] << 12) |
- (clkdiv_cpu0_5250[i][4] << 16) |
- (clkdiv_cpu0_5250[i][5] << 20) |
- (clkdiv_cpu0_5250[i][6] << 24) |
- (clkdiv_cpu0_5250[i][7] << 28));
-
- exynos5250_clkdiv_table[i].clkdiv = tmp;
-
- tmp = __raw_readl(EXYNOS5_CLKDIV_CPU1);
-
- tmp &= ~((0x7 << 0) | (0x7 << 4));
-
- tmp |= ((clkdiv_cpu1_5250[i][0] << 0) |
- (clkdiv_cpu1_5250[i][1] << 4));
-
- exynos5250_clkdiv_table[i].clkdiv1 = tmp;
- }
-
info->mpll_freq_khz = rate;
- /* 1000Mhz */
- info->pm_lock_idx = L7;
/* 800Mhz */
info->pll_safe_idx = L9;
- info->max_support_idx = max_support_idx;
- info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos5250_volt_table;
info->freq_table = exynos5250_freq_table;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 49cda256efb2..d7a79662e24c 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -63,9 +63,6 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
@@ -121,9 +118,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
break;
}
- if (!cpu_online(policy->cpu))
- return -EINVAL;
-
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
@@ -227,6 +221,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
+{
+ pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
+ policy->cpu, policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
+ policy->last_cpu);
+ per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
+}
+
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
return per_cpu(cpufreq_show_table, cpu);
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
new file mode 100644
index 000000000000..66e3a71b81a3
--- /dev/null
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver provides the clk notifier callbacks that are used when
+ * the cpufreq-cpu0 driver changes to frequency to alert the highbank
+ * EnergyCore Management Engine (ECME) about the need to change
+ * voltage. The ECME interfaces with the actual voltage regulators.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/mailbox.h>
+#include <linux/platform_device.h>
+
+#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
+#define HB_CPUFREQ_IPC_LEN 7
+#define HB_CPUFREQ_VOLT_RETRIES 15
+
+static int hb_voltage_change(unsigned int freq)
+{
+ int i;
+ u32 msg[HB_CPUFREQ_IPC_LEN];
+
+ msg[0] = HB_CPUFREQ_CHANGE_NOTE;
+ msg[1] = freq / 1000000;
+ for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
+ msg[i] = 0;
+
+ return pl320_ipc_transmit(msg);
+}
+
+static int hb_cpufreq_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *hclk)
+{
+ struct clk_notifier_data *clk_data = hclk;
+ int i = 0;
+
+ if (action == PRE_RATE_CHANGE) {
+ if (clk_data->new_rate > clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ } else if (action == POST_RATE_CHANGE) {
+ if (clk_data->new_rate < clk_data->old_rate)
+ while (hb_voltage_change(clk_data->new_rate))
+ if (i++ > HB_CPUFREQ_VOLT_RETRIES)
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hb_cpufreq_clk_nb = {
+ .notifier_call = hb_cpufreq_clk_notify,
+};
+
+static int hb_cpufreq_driver_init(void)
+{
+ struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
+ struct device *cpu_dev;
+ struct clk *cpu_clk;
+ struct device_node *np;
+ int ret;
+
+ if (!of_machine_is_compatible("calxeda,highbank"))
+ return -ENODEV;
+
+ for_each_child_of_node(of_find_node_by_path("/cpus"), np)
+ if (of_get_property(np, "operating-points", NULL))
+ break;
+
+ if (!np) {
+ pr_err("failed to find highbank cpufreq node\n");
+ return -ENOENT;
+ }
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ pr_err("failed to get highbank cpufreq device\n");
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ cpu_dev->of_node = np;
+
+ cpu_clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(cpu_clk)) {
+ ret = PTR_ERR(cpu_clk);
+ pr_err("failed to get cpu0 clock: %d\n", ret);
+ goto out_put_node;
+ }
+
+ ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
+ if (ret) {
+ pr_err("failed to register clk notifier: %d\n", ret);
+ goto out_put_node;
+ }
+
+ /* Instantiate cpufreq-cpu0 */
+ platform_device_register_full(&devinfo);
+
+out_put_node:
+ of_node_put(np);
+ return ret;
+}
+module_init(hb_cpufreq_driver_init);
+
+MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
+MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
new file mode 100644
index 000000000000..54e336de373b
--- /dev/null
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#define PU_SOC_VOLTAGE_NORMAL 1250000
+#define PU_SOC_VOLTAGE_HIGH 1275000
+#define FREQ_1P2_GHZ 1200000000
+
+static struct regulator *arm_reg;
+static struct regulator *pu_reg;
+static struct regulator *soc_reg;
+
+static struct clk *arm_clk;
+static struct clk *pll1_sys_clk;
+static struct clk *pll1_sw_clk;
+static struct clk *step_clk;
+static struct clk *pll2_pfd2_396m_clk;
+
+static struct device *cpu_dev;
+static struct cpufreq_frequency_table *freq_table;
+static unsigned int transition_latency;
+
+static int imx6q_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int imx6q_get_speed(unsigned int cpu)
+{
+ return clk_get_rate(arm_clk) / 1000;
+}
+
+static int imx6q_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ struct cpufreq_freqs freqs;
+ struct opp *opp;
+ unsigned long freq_hz, volt, volt_old;
+ unsigned int index, cpu;
+ int ret;
+
+ ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &index);
+ if (ret) {
+ dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
+ target_freq, ret);
+ return ret;
+ }
+
+ freqs.new = freq_table[index].frequency;
+ freq_hz = freqs.new * 1000;
+ freqs.old = clk_get_rate(arm_clk) / 1000;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+ rcu_read_lock();
+ opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
+ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
+ return PTR_ERR(opp);
+ }
+
+ volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ volt_old = regulator_get_voltage(arm_reg);
+
+ dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
+ freqs.old / 1000, volt_old / 1000,
+ freqs.new / 1000, volt / 1000);
+
+ /* scaling up? scale voltage before frequency */
+ if (freqs.new > freqs.old) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale vddarm up: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Need to increase vddpu and vddsoc for safety
+ * if we are about to run at 1.2 GHz.
+ */
+ if (freqs.new == FREQ_1P2_GHZ / 1000) {
+ regulator_set_voltage_tol(pu_reg,
+ PU_SOC_VOLTAGE_HIGH, 0);
+ regulator_set_voltage_tol(soc_reg,
+ PU_SOC_VOLTAGE_HIGH, 0);
+ }
+ }
+
+ /*
+ * The setpoints are selected per PLL/PDF frequencies, so we need to
+ * reprogram PLL for frequency scaling. The procedure of reprogramming
+ * PLL1 is as below.
+ *
+ * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
+ * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
+ * - Disable pll2_pfd2_396m_clk
+ */
+ clk_prepare_enable(pll2_pfd2_396m_clk);
+ clk_set_parent(step_clk, pll2_pfd2_396m_clk);
+ clk_set_parent(pll1_sw_clk, step_clk);
+ if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
+ clk_set_rate(pll1_sys_clk, freqs.new * 1000);
+ /*
+ * If we are leaving 396 MHz set-point, we need to enable
+ * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep
+ * their use count correct.
+ */
+ if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) {
+ clk_prepare_enable(pll1_sys_clk);
+ clk_disable_unprepare(pll2_pfd2_396m_clk);
+ }
+ clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_disable_unprepare(pll2_pfd2_396m_clk);
+ } else {
+ /*
+ * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient
+ * to provide the frequency.
+ */
+ clk_disable_unprepare(pll1_sys_clk);
+ }
+
+ /* Ensure the arm clock divider is what we expect */
+ ret = clk_set_rate(arm_clk, freqs.new * 1000);
+ if (ret) {
+ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
+ regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ return ret;
+ }
+
+ /* scaling down? scale voltage after frequency */
+ if (freqs.new < freqs.old) {
+ ret = regulator_set_voltage_tol(arm_reg, volt, 0);
+ if (ret)
+ dev_warn(cpu_dev,
+ "failed to scale vddarm down: %d\n", ret);
+
+ if (freqs.old == FREQ_1P2_GHZ / 1000) {
+ regulator_set_voltage_tol(pu_reg,
+ PU_SOC_VOLTAGE_NORMAL, 0);
+ regulator_set_voltage_tol(soc_reg,
+ PU_SOC_VOLTAGE_NORMAL, 0);
+ }
+ }
+
+ for_each_online_cpu(cpu) {
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+
+ return 0;
+}
+
+static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int ret;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
+ return ret;
+ }
+
+ policy->cpuinfo.transition_latency = transition_latency;
+ policy->cur = clk_get_rate(arm_clk) / 1000;
+ cpumask_setall(policy->cpus);
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *imx6q_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver imx6q_cpufreq_driver = {
+ .verify = imx6q_verify_speed,
+ .target = imx6q_set_target,
+ .get = imx6q_get_speed,
+ .init = imx6q_cpufreq_init,
+ .exit = imx6q_cpufreq_exit,
+ .name = "imx6q-cpufreq",
+ .attr = imx6q_cpufreq_attr,
+};
+
+static int imx6q_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct opp *opp;
+ unsigned long min_volt, max_volt;
+ int num, ret;
+
+ cpu_dev = &pdev->dev;
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np) {
+ dev_err(cpu_dev, "failed to find cpu0 node\n");
+ return -ENOENT;
+ }
+
+ cpu_dev->of_node = np;
+
+ arm_clk = devm_clk_get(cpu_dev, "arm");
+ pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
+ pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
+ step_clk = devm_clk_get(cpu_dev, "step");
+ pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
+ if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
+ IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
+ dev_err(cpu_dev, "failed to get clocks\n");
+ ret = -ENOENT;
+ goto put_node;
+ }
+
+ arm_reg = devm_regulator_get(cpu_dev, "arm");
+ pu_reg = devm_regulator_get(cpu_dev, "pu");
+ soc_reg = devm_regulator_get(cpu_dev, "soc");
+ if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
+ dev_err(cpu_dev, "failed to get regulators\n");
+ ret = -ENOENT;
+ goto put_node;
+ }
+
+ /* We expect an OPP table supplied by platform */
+ num = opp_get_opp_count(cpu_dev);
+ if (num < 0) {
+ ret = num;
+ dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
+ goto put_node;
+ }
+
+ ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto put_node;
+ }
+
+ if (of_property_read_u32(np, "clock-latency", &transition_latency))
+ transition_latency = CPUFREQ_ETERNAL;
+
+ /*
+ * OPP is maintained in order of increasing frequency, and
+ * freq_table initialised from OPP is therefore sorted in the
+ * same order.
+ */
+ rcu_read_lock();
+ opp = opp_find_freq_exact(cpu_dev,
+ freq_table[0].frequency * 1000, true);
+ min_volt = opp_get_voltage(opp);
+ opp = opp_find_freq_exact(cpu_dev,
+ freq_table[--num].frequency * 1000, true);
+ max_volt = opp_get_voltage(opp);
+ rcu_read_unlock();
+ ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+
+ /* Count vddpu and vddsoc latency in for 1.2 GHz support */
+ if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
+ ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
+ PU_SOC_VOLTAGE_HIGH);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
+ PU_SOC_VOLTAGE_HIGH);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
+
+ ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
+ if (ret) {
+ dev_err(cpu_dev, "failed register driver: %d\n", ret);
+ goto free_freq_table;
+ }
+
+ of_node_put(np);
+ return 0;
+
+free_freq_table:
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int imx6q_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&imx6q_cpufreq_driver);
+ opp_free_cpufreq_table(cpu_dev, &freq_table);
+
+ return 0;
+}
+
+static struct platform_driver imx6q_cpufreq_platdrv = {
+ .driver = {
+ .name = "imx6q-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = imx6q_cpufreq_probe,
+ .remove = imx6q_cpufreq_remove,
+};
+module_platform_driver(imx6q_cpufreq_platdrv);
+
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
new file mode 100644
index 000000000000..096fde0ebcb5
--- /dev/null
+++ b/drivers/cpufreq/intel_pstate.c
@@ -0,0 +1,823 @@
+/*
+ * cpufreq_snb.c: Native P state management for Intel processors
+ *
+ * (C) Copyright 2012 Intel Corporation
+ * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <trace/events/power.h>
+
+#include <asm/div64.h>
+#include <asm/msr.h>
+#include <asm/cpu_device_id.h>
+
+#define SAMPLE_COUNT 3
+
+#define FRAC_BITS 8
+#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
+#define fp_toint(X) ((X) >> FRAC_BITS)
+
+static inline int32_t mul_fp(int32_t x, int32_t y)
+{
+ return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
+}
+
+static inline int32_t div_fp(int32_t x, int32_t y)
+{
+ return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
+}
+
+struct sample {
+ ktime_t start_time;
+ ktime_t end_time;
+ int core_pct_busy;
+ int pstate_pct_busy;
+ u64 duration_us;
+ u64 idletime_us;
+ u64 aperf;
+ u64 mperf;
+ int freq;
+};
+
+struct pstate_data {
+ int current_pstate;
+ int min_pstate;
+ int max_pstate;
+ int turbo_pstate;
+};
+
+struct _pid {
+ int setpoint;
+ int32_t integral;
+ int32_t p_gain;
+ int32_t i_gain;
+ int32_t d_gain;
+ int deadband;
+ int last_err;
+};
+
+struct cpudata {
+ int cpu;
+
+ char name[64];
+
+ struct timer_list timer;
+
+ struct pstate_adjust_policy *pstate_policy;
+ struct pstate_data pstate;
+ struct _pid pid;
+ struct _pid idle_pid;
+
+ int min_pstate_count;
+ int idle_mode;
+
+ ktime_t prev_sample;
+ u64 prev_idle_time_us;
+ u64 prev_aperf;
+ u64 prev_mperf;
+ int sample_ptr;
+ struct sample samples[SAMPLE_COUNT];
+};
+
+static struct cpudata **all_cpu_data;
+struct pstate_adjust_policy {
+ int sample_rate_ms;
+ int deadband;
+ int setpoint;
+ int p_gain_pct;
+ int d_gain_pct;
+ int i_gain_pct;
+};
+
+static struct pstate_adjust_policy default_policy = {
+ .sample_rate_ms = 10,
+ .deadband = 0,
+ .setpoint = 109,
+ .p_gain_pct = 17,
+ .d_gain_pct = 0,
+ .i_gain_pct = 4,
+};
+
+struct perf_limits {
+ int no_turbo;
+ int max_perf_pct;
+ int min_perf_pct;
+ int32_t max_perf;
+ int32_t min_perf;
+};
+
+static struct perf_limits limits = {
+ .no_turbo = 0,
+ .max_perf_pct = 100,
+ .max_perf = int_tofp(1),
+ .min_perf_pct = 0,
+ .min_perf = 0,
+};
+
+static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
+ int deadband, int integral) {
+ pid->setpoint = setpoint;
+ pid->deadband = deadband;
+ pid->integral = int_tofp(integral);
+ pid->last_err = setpoint - busy;
+}
+
+static inline void pid_p_gain_set(struct _pid *pid, int percent)
+{
+ pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_i_gain_set(struct _pid *pid, int percent)
+{
+ pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static inline void pid_d_gain_set(struct _pid *pid, int percent)
+{
+
+ pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+}
+
+static signed int pid_calc(struct _pid *pid, int busy)
+{
+ signed int err, result;
+ int32_t pterm, dterm, fp_error;
+ int32_t integral_limit;
+
+ err = pid->setpoint - busy;
+ fp_error = int_tofp(err);
+
+ if (abs(err) <= pid->deadband)
+ return 0;
+
+ pterm = mul_fp(pid->p_gain, fp_error);
+
+ pid->integral += fp_error;
+
+ /* limit the integral term */
+ integral_limit = int_tofp(30);
+ if (pid->integral > integral_limit)
+ pid->integral = integral_limit;
+ if (pid->integral < -integral_limit)
+ pid->integral = -integral_limit;
+
+ dterm = mul_fp(pid->d_gain, (err - pid->last_err));
+ pid->last_err = err;
+
+ result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
+
+ return (signed int)fp_toint(result);
+}
+
+static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
+{
+ pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct);
+ pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct);
+ pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct);
+
+ pid_reset(&cpu->pid,
+ cpu->pstate_policy->setpoint,
+ 100,
+ cpu->pstate_policy->deadband,
+ 0);
+}
+
+static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
+{
+ pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
+ pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
+ pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
+
+ pid_reset(&cpu->idle_pid,
+ 75,
+ 50,
+ cpu->pstate_policy->deadband,
+ 0);
+}
+
+static inline void intel_pstate_reset_all_pid(void)
+{
+ unsigned int cpu;
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu])
+ intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
+ }
+}
+
+/************************** debugfs begin ************************/
+static int pid_param_set(void *data, u64 val)
+{
+ *(u32 *)data = val;
+ intel_pstate_reset_all_pid();
+ return 0;
+}
+static int pid_param_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
+ pid_param_set, "%llu\n");
+
+struct pid_param {
+ char *name;
+ void *value;
+};
+
+static struct pid_param pid_files[] = {
+ {"sample_rate_ms", &default_policy.sample_rate_ms},
+ {"d_gain_pct", &default_policy.d_gain_pct},
+ {"i_gain_pct", &default_policy.i_gain_pct},
+ {"deadband", &default_policy.deadband},
+ {"setpoint", &default_policy.setpoint},
+ {"p_gain_pct", &default_policy.p_gain_pct},
+ {NULL, NULL}
+};
+
+static struct dentry *debugfs_parent;
+static void intel_pstate_debug_expose_params(void)
+{
+ int i = 0;
+
+ debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
+ if (IS_ERR_OR_NULL(debugfs_parent))
+ return;
+ while (pid_files[i].name) {
+ debugfs_create_file(pid_files[i].name, 0660,
+ debugfs_parent, pid_files[i].value,
+ &fops_pid_param);
+ i++;
+ }
+}
+
+/************************** debugfs end ************************/
+
+/************************** sysfs begin ************************/
+#define show_one(file_name, object) \
+ static ssize_t show_##file_name \
+ (struct kobject *kobj, struct attribute *attr, char *buf) \
+ { \
+ return sprintf(buf, "%u\n", limits.object); \
+ }
+
+static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ limits.no_turbo = clamp_t(int, input, 0 , 1);
+
+ return count;
+}
+
+static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ limits.max_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+ return count;
+}
+
+static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+ limits.min_perf_pct = clamp_t(int, input, 0 , 100);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ return count;
+}
+
+show_one(no_turbo, no_turbo);
+show_one(max_perf_pct, max_perf_pct);
+show_one(min_perf_pct, min_perf_pct);
+
+define_one_global_rw(no_turbo);
+define_one_global_rw(max_perf_pct);
+define_one_global_rw(min_perf_pct);
+
+static struct attribute *intel_pstate_attributes[] = {
+ &no_turbo.attr,
+ &max_perf_pct.attr,
+ &min_perf_pct.attr,
+ NULL
+};
+
+static struct attribute_group intel_pstate_attr_group = {
+ .attrs = intel_pstate_attributes,
+};
+static struct kobject *intel_pstate_kobject;
+
+static void intel_pstate_sysfs_expose_params(void)
+{
+ int rc;
+
+ intel_pstate_kobject = kobject_create_and_add("intel_pstate",
+ &cpu_subsys.dev_root->kobj);
+ BUG_ON(!intel_pstate_kobject);
+ rc = sysfs_create_group(intel_pstate_kobject,
+ &intel_pstate_attr_group);
+ BUG_ON(rc);
+}
+
+/************************** sysfs end ************************/
+
+static int intel_pstate_min_pstate(void)
+{
+ u64 value;
+ rdmsrl(0xCE, value);
+ return (value >> 40) & 0xFF;
+}
+
+static int intel_pstate_max_pstate(void)
+{
+ u64 value;
+ rdmsrl(0xCE, value);
+ return (value >> 8) & 0xFF;
+}
+
+static int intel_pstate_turbo_pstate(void)
+{
+ u64 value;
+ int nont, ret;
+ rdmsrl(0x1AD, value);
+ nont = intel_pstate_max_pstate();
+ ret = ((value) & 255);
+ if (ret <= nont)
+ ret = nont;
+ return ret;
+}
+
+static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
+{
+ int max_perf = cpu->pstate.turbo_pstate;
+ int min_perf;
+ if (limits.no_turbo)
+ max_perf = cpu->pstate.max_pstate;
+
+ max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+ *max = clamp_t(int, max_perf,
+ cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
+
+ min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
+ *min = clamp_t(int, min_perf,
+ cpu->pstate.min_pstate, max_perf);
+}
+
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+{
+ int max_perf, min_perf;
+
+ intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
+
+ pstate = clamp_t(int, pstate, min_perf, max_perf);
+
+ if (pstate == cpu->pstate.current_pstate)
+ return;
+
+#ifndef MODULE
+ trace_cpu_frequency(pstate * 100000, cpu->cpu);
+#endif
+ cpu->pstate.current_pstate = pstate;
+ wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
+
+}
+
+static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
+{
+ int target;
+ target = cpu->pstate.current_pstate + steps;
+
+ intel_pstate_set_pstate(cpu, target);
+}
+
+static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
+{
+ int target;
+ target = cpu->pstate.current_pstate - steps;
+ intel_pstate_set_pstate(cpu, target);
+}
+
+static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+{
+ sprintf(cpu->name, "Intel 2nd generation core");
+
+ cpu->pstate.min_pstate = intel_pstate_min_pstate();
+ cpu->pstate.max_pstate = intel_pstate_max_pstate();
+ cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate();
+
+ /*
+ * goto max pstate so we don't slow up boot if we are built-in if we are
+ * a module we will take care of it during normal operation
+ */
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+}
+
+static inline void intel_pstate_calc_busy(struct cpudata *cpu,
+ struct sample *sample)
+{
+ u64 core_pct;
+ sample->pstate_pct_busy = 100 - div64_u64(
+ sample->idletime_us * 100,
+ sample->duration_us);
+ core_pct = div64_u64(sample->aperf * 100, sample->mperf);
+ sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
+
+ sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
+ 100);
+}
+
+static inline void intel_pstate_sample(struct cpudata *cpu)
+{
+ ktime_t now;
+ u64 idle_time_us;
+ u64 aperf, mperf;
+
+ now = ktime_get();
+ idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
+
+ rdmsrl(MSR_IA32_APERF, aperf);
+ rdmsrl(MSR_IA32_MPERF, mperf);
+ /* for the first sample, don't actually record a sample, just
+ * set the baseline */
+ if (cpu->prev_idle_time_us > 0) {
+ cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
+ cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample;
+ cpu->samples[cpu->sample_ptr].end_time = now;
+ cpu->samples[cpu->sample_ptr].duration_us =
+ ktime_us_delta(now, cpu->prev_sample);
+ cpu->samples[cpu->sample_ptr].idletime_us =
+ idle_time_us - cpu->prev_idle_time_us;
+
+ cpu->samples[cpu->sample_ptr].aperf = aperf;
+ cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
+ cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+
+ intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
+ }
+
+ cpu->prev_sample = now;
+ cpu->prev_idle_time_us = idle_time_us;
+ cpu->prev_aperf = aperf;
+ cpu->prev_mperf = mperf;
+}
+
+static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
+{
+ int sample_time, delay;
+
+ sample_time = cpu->pstate_policy->sample_rate_ms;
+ delay = msecs_to_jiffies(sample_time);
+ delay -= jiffies % delay;
+ mod_timer_pinned(&cpu->timer, jiffies + delay);
+}
+
+static inline void intel_pstate_idle_mode(struct cpudata *cpu)
+{
+ cpu->idle_mode = 1;
+}
+
+static inline void intel_pstate_normal_mode(struct cpudata *cpu)
+{
+ cpu->idle_mode = 0;
+}
+
+static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
+{
+ int32_t busy_scaled;
+ int32_t core_busy, turbo_pstate, current_pstate;
+
+ core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
+ turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+ current_pstate = int_tofp(cpu->pstate.current_pstate);
+ busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+
+ return fp_toint(busy_scaled);
+}
+
+static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
+{
+ int busy_scaled;
+ struct _pid *pid;
+ signed int ctl = 0;
+ int steps;
+
+ pid = &cpu->pid;
+ busy_scaled = intel_pstate_get_scaled_busy(cpu);
+
+ ctl = pid_calc(pid, busy_scaled);
+
+ steps = abs(ctl);
+ if (ctl < 0)
+ intel_pstate_pstate_increase(cpu, steps);
+ else
+ intel_pstate_pstate_decrease(cpu, steps);
+}
+
+static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
+{
+ int busy_scaled;
+ struct _pid *pid;
+ int ctl = 0;
+ int steps;
+
+ pid = &cpu->idle_pid;
+
+ busy_scaled = intel_pstate_get_scaled_busy(cpu);
+
+ ctl = pid_calc(pid, 100 - busy_scaled);
+
+ steps = abs(ctl);
+ if (ctl < 0)
+ intel_pstate_pstate_decrease(cpu, steps);
+ else
+ intel_pstate_pstate_increase(cpu, steps);
+
+ if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
+ intel_pstate_normal_mode(cpu);
+}
+
+static void intel_pstate_timer_func(unsigned long __data)
+{
+ struct cpudata *cpu = (struct cpudata *) __data;
+
+ intel_pstate_sample(cpu);
+
+ if (!cpu->idle_mode)
+ intel_pstate_adjust_busy_pstate(cpu);
+ else
+ intel_pstate_adjust_idle_pstate(cpu);
+
+#if defined(XPERF_FIX)
+ if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
+ cpu->min_pstate_count++;
+ if (!(cpu->min_pstate_count % 5)) {
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+ intel_pstate_idle_mode(cpu);
+ }
+ } else
+ cpu->min_pstate_count = 0;
+#endif
+ intel_pstate_set_sample_time(cpu);
+}
+
+#define ICPU(model, policy) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy }
+
+static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
+ ICPU(0x2a, default_policy),
+ ICPU(0x2d, default_policy),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
+
+static int intel_pstate_init_cpu(unsigned int cpunum)
+{
+
+ const struct x86_cpu_id *id;
+ struct cpudata *cpu;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
+ if (!all_cpu_data[cpunum])
+ return -ENOMEM;
+
+ cpu = all_cpu_data[cpunum];
+
+ intel_pstate_get_cpu_pstates(cpu);
+
+ cpu->cpu = cpunum;
+ cpu->pstate_policy =
+ (struct pstate_adjust_policy *)id->driver_data;
+ init_timer_deferrable(&cpu->timer);
+ cpu->timer.function = intel_pstate_timer_func;
+ cpu->timer.data =
+ (unsigned long)cpu;
+ cpu->timer.expires = jiffies + HZ/100;
+ intel_pstate_busy_pid_reset(cpu);
+ intel_pstate_idle_pid_reset(cpu);
+ intel_pstate_sample(cpu);
+ intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+
+ add_timer_on(&cpu->timer, cpunum);
+
+ pr_info("Intel pstate controlling: cpu %d\n", cpunum);
+
+ return 0;
+}
+
+static unsigned int intel_pstate_get(unsigned int cpu_num)
+{
+ struct sample *sample;
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[cpu_num];
+ if (!cpu)
+ return 0;
+ sample = &cpu->samples[cpu->sample_ptr];
+ return sample->freq;
+}
+
+static int intel_pstate_set_policy(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int min, max;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ intel_pstate_get_min_max(cpu, &min, &max);
+
+ limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+ limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
+ limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+ limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+ limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+
+ if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ limits.min_perf_pct = 100;
+ limits.min_perf = int_tofp(1);
+ limits.max_perf_pct = 100;
+ limits.max_perf = int_tofp(1);
+ limits.no_turbo = 0;
+ }
+
+ return 0;
+}
+
+static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+
+ if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
+ (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ int cpu = policy->cpu;
+
+ del_timer(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ all_cpu_data[cpu] = NULL;
+ return 0;
+}
+
+static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy)
+{
+ int rc, min_pstate, max_pstate;
+ struct cpudata *cpu;
+
+ rc = intel_pstate_init_cpu(policy->cpu);
+ if (rc)
+ return rc;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ if (!limits.no_turbo &&
+ limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ else
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ policy->min = min_pstate * 100000;
+ policy->max = max_pstate * 100000;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
+ policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+
+ return 0;
+}
+
+static struct cpufreq_driver intel_pstate_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = intel_pstate_verify_policy,
+ .setpolicy = intel_pstate_set_policy,
+ .get = intel_pstate_get,
+ .init = intel_pstate_cpu_init,
+ .exit = intel_pstate_cpu_exit,
+ .name = "intel_pstate",
+ .owner = THIS_MODULE,
+};
+
+static void intel_pstate_exit(void)
+{
+ int cpu;
+
+ sysfs_remove_group(intel_pstate_kobject,
+ &intel_pstate_attr_group);
+ debugfs_remove_recursive(debugfs_parent);
+
+ cpufreq_unregister_driver(&intel_pstate_driver);
+
+ if (!all_cpu_data)
+ return;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if (all_cpu_data[cpu]) {
+ del_timer_sync(&all_cpu_data[cpu]->timer);
+ kfree(all_cpu_data[cpu]);
+ }
+ }
+
+ put_online_cpus();
+ vfree(all_cpu_data);
+}
+module_exit(intel_pstate_exit);
+
+static int __initdata no_load;
+
+static int __init intel_pstate_init(void)
+{
+ int rc = 0;
+ const struct x86_cpu_id *id;
+
+ if (no_load)
+ return -ENODEV;
+
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ pr_info("Intel P-state driver initializing.\n");
+
+ all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
+ if (!all_cpu_data)
+ return -ENOMEM;
+ memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
+
+ rc = cpufreq_register_driver(&intel_pstate_driver);
+ if (rc)
+ goto out;
+
+ intel_pstate_debug_expose_params();
+ intel_pstate_sysfs_expose_params();
+ return rc;
+out:
+ intel_pstate_exit();
+ return -ENODEV;
+}
+device_initcall(intel_pstate_init);
+
+static int __init intel_pstate_setup(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "disable"))
+ no_load = 1;
+ return 0;
+}
+early_param("intel_pstate", intel_pstate_setup);
+
+MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
+MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
new file mode 100644
index 000000000000..0e83e3c24f5b
--- /dev/null
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -0,0 +1,259 @@
+/*
+ * kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
+ *
+ * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <asm/proc-fns.h>
+
+#define CPU_SW_INT_BLK BIT(28)
+
+static struct priv
+{
+ struct clk *cpu_clk;
+ struct clk *ddr_clk;
+ struct clk *powersave_clk;
+ struct device *dev;
+ void __iomem *base;
+} priv;
+
+#define STATE_CPU_FREQ 0x01
+#define STATE_DDR_FREQ 0x02
+
+/*
+ * Kirkwood can swap the clock to the CPU between two clocks:
+ *
+ * - cpu clk
+ * - ddr clk
+ *
+ * The frequencies are set at runtime before registering this *
+ * table.
+ */
+static struct cpufreq_frequency_table kirkwood_freq_table[] = {
+ {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
+ {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
+ {0, CPUFREQ_TABLE_END},
+};
+
+static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
+{
+ if (__clk_is_enabled(priv.powersave_clk))
+ return kirkwood_freq_table[1].frequency;
+ return kirkwood_freq_table[0].frequency;
+}
+
+static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
+{
+ struct cpufreq_freqs freqs;
+ unsigned int state = kirkwood_freq_table[index].index;
+ unsigned long reg;
+
+ freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
+ freqs.new = kirkwood_freq_table[index].frequency;
+ freqs.cpu = 0; /* Kirkwood is UP */
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
+ kirkwood_freq_table[index].frequency);
+ dev_dbg(priv.dev, "old frequency was %i KHz\n",
+ kirkwood_cpufreq_get_cpu_frequency(0));
+
+ if (freqs.old != freqs.new) {
+ local_irq_disable();
+
+ /* Disable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg |= CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ switch (state) {
+ case STATE_CPU_FREQ:
+ clk_disable(priv.powersave_clk);
+ break;
+ case STATE_DDR_FREQ:
+ clk_enable(priv.powersave_clk);
+ break;
+ }
+
+ /* Wait-for-Interrupt, while the hardware changes frequency */
+ cpu_do_idle();
+
+ /* Enable interrupts to the CPU */
+ reg = readl_relaxed(priv.base);
+ reg &= ~CPU_SW_INT_BLK;
+ writel_relaxed(reg, priv.base);
+
+ local_irq_enable();
+ }
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+};
+
+static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
+}
+
+static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index = 0;
+
+ if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ kirkwood_cpufreq_set_cpu_state(index);
+
+ return 0;
+}
+
+/* Module init and exit code */
+static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ int result;
+
+ /* cpuinfo and default policy values */
+ policy->cpuinfo.transition_latency = 5000; /* 5uS */
+ policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
+
+ result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
+ if (result)
+ return result;
+
+ cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
+
+ return 0;
+}
+
+static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ cpufreq_frequency_table_put_attr(policy->cpu);
+ return 0;
+}
+
+static struct freq_attr *kirkwood_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver kirkwood_cpufreq_driver = {
+ .get = kirkwood_cpufreq_get_cpu_frequency,
+ .verify = kirkwood_cpufreq_verify,
+ .target = kirkwood_cpufreq_target,
+ .init = kirkwood_cpufreq_cpu_init,
+ .exit = kirkwood_cpufreq_cpu_exit,
+ .name = "kirkwood-cpufreq",
+ .owner = THIS_MODULE,
+ .attr = kirkwood_cpufreq_attr,
+};
+
+static int kirkwood_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct resource *res;
+ int err;
+
+ priv.dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get memory resource\n");
+ return -ENODEV;
+ }
+ priv.base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!priv.base) {
+ dev_err(&pdev->dev, "Cannot ioremap\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ np = of_find_node_by_path("/cpus/cpu@0");
+ if (!np)
+ return -ENODEV;
+
+ priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
+ if (IS_ERR(priv.cpu_clk)) {
+ dev_err(priv.dev, "Unable to get cpuclk");
+ return PTR_ERR(priv.cpu_clk);
+ }
+
+ clk_prepare_enable(priv.cpu_clk);
+ kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
+
+ priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
+ if (IS_ERR(priv.ddr_clk)) {
+ dev_err(priv.dev, "Unable to get ddrclk");
+ err = PTR_ERR(priv.ddr_clk);
+ goto out_cpu;
+ }
+
+ clk_prepare_enable(priv.ddr_clk);
+ kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
+
+ priv.powersave_clk = of_clk_get_by_name(np, "powersave");
+ if (IS_ERR(priv.powersave_clk)) {
+ dev_err(priv.dev, "Unable to get powersave");
+ err = PTR_ERR(priv.powersave_clk);
+ goto out_ddr;
+ }
+ clk_prepare(priv.powersave_clk);
+
+ of_node_put(np);
+ np = NULL;
+
+ err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
+ if (!err)
+ return 0;
+
+ dev_err(priv.dev, "Failed to register cpufreq driver");
+
+ clk_disable_unprepare(priv.powersave_clk);
+out_ddr:
+ clk_disable_unprepare(priv.ddr_clk);
+out_cpu:
+ clk_disable_unprepare(priv.cpu_clk);
+ of_node_put(np);
+
+ return err;
+}
+
+static int kirkwood_cpufreq_remove(struct platform_device *pdev)
+{
+ cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
+
+ clk_disable_unprepare(priv.powersave_clk);
+ clk_disable_unprepare(priv.ddr_clk);
+ clk_disable_unprepare(priv.cpu_clk);
+
+ return 0;
+}
+
+static struct platform_driver kirkwood_cpufreq_platform_driver = {
+ .probe = kirkwood_cpufreq_probe,
+ .remove = kirkwood_cpufreq_remove,
+ .driver = {
+ .name = "kirkwood-cpufreq",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(kirkwood_cpufreq_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
+MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
+MODULE_ALIAS("platform:kirkwood-cpufreq");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index 89b178a3f849..d4c4989823dc 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -181,7 +181,7 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
- cpumask_copy(policy->cpus, cpu_online_mask);
+ cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 97102b05843f..9128c07bafba 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -214,10 +214,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
- if (is_smp()) {
- policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ if (is_smp())
cpumask_setall(policy->cpus);
- }
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 056faf6af1a9..d13a13678b5f 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1249,39 +1249,59 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
+static void __request_acpi_cpufreq(void)
+{
+ const char *cur_drv, *drv = "acpi-cpufreq";
+
+ cur_drv = cpufreq_get_current_driver();
+ if (!cur_drv)
+ goto request;
+
+ if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
+ pr_warn(PFX "WTF driver: %s\n", cur_drv);
+
+ return;
+
+ request:
+ pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n");
+ request_module(drv);
+}
+
/* driver entry point for init */
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0;
- int rv;
+ int ret;
if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
- pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
- request_module("acpi-cpufreq");
+ __request_acpi_cpufreq();
return -ENODEV;
}
if (!x86_match_cpu(powernow_k8_ids))
return -ENODEV;
+ get_online_cpus();
for_each_online_cpu(i) {
- int rc;
- smp_call_function_single(i, check_supported_cpu, &rc, 1);
- if (rc == 0)
+ smp_call_function_single(i, check_supported_cpu, &ret, 1);
+ if (!ret)
supported_cpus++;
}
- if (supported_cpus != num_online_cpus())
+ if (supported_cpus != num_online_cpus()) {
+ put_online_cpus();
return -ENODEV;
+ }
+ put_online_cpus();
- rv = cpufreq_register_driver(&cpufreq_amd64_driver);
+ ret = cpufreq_register_driver(&cpufreq_amd64_driver);
+ if (ret)
+ return ret;
- if (!rv)
- pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
- num_online_nodes(), boot_cpu_data.x86_model_id,
- supported_cpus);
+ pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
+ num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
- return rv;
+ return ret;
}
/* driver entry point for term */
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 4575cfe41755..7e4d77327957 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -30,7 +30,7 @@ static struct {
u32 cnt;
} spear_cpufreq;
-int spear_cpufreq_verify(struct cpufreq_policy *policy)
+static int spear_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
}
@@ -157,7 +157,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = newfreq / 1000;
freqs.new /= mult;
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -170,7 +172,8 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
}
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
@@ -188,8 +191,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
policy->cur = spear_cpufreq_get(0);
- cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
- cpumask_copy(policy->related_cpus, policy->cpus);
+ cpumask_setall(policy->cpus);
return 0;
}