summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/include/asm/time.h7
-rw-r--r--arch/blackfin/kernel/time-ts.c4
-rw-r--r--arch/blackfin/mach-common/cpufreq.c155
4 files changed, 101 insertions, 66 deletions
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index ed339b83f5af..970df5b5c525 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -1237,7 +1237,6 @@ config PM_BFIN_WAKE_GP
endmenu
menu "CPU Frequency scaling"
- depends on !SMP
source "drivers/cpufreq/Kconfig"
diff --git a/arch/blackfin/include/asm/time.h b/arch/blackfin/include/asm/time.h
index 767b938ccf8c..9ca7db844d10 100644
--- a/arch/blackfin/include/asm/time.h
+++ b/arch/blackfin/include/asm/time.h
@@ -23,9 +23,7 @@
*/
#ifndef CONFIG_CPU_FREQ
-#define TIME_SCALE 1
-#define __bfin_cycles_off (0)
-#define __bfin_cycles_mod (0)
+# define TIME_SCALE 1
#else
/*
* Blackfin CPU frequency scaling supports max Core Clock 1, 1/2 and 1/4 .
@@ -33,8 +31,11 @@
* adjust the Core Timer Presale Register. This way we don't lose time.
*/
#define TIME_SCALE 4
+
+# ifdef CONFIG_CYCLES_CLOCKSOURCE
extern unsigned long long __bfin_cycles_off;
extern unsigned int __bfin_cycles_mod;
+# endif
#endif
#if defined(CONFIG_TICKSOURCE_CORETMR)
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 41a907596c70..cb7a01d4f009 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -51,7 +51,11 @@
static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
{
+#ifdef CONFIG_CPU_FREQ
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
+#else
+ return get_cycles();
+#endif
}
static struct clocksource bfin_cs_cycles = {
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 777582897253..5d7f8ab5509a 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -15,6 +15,8 @@
#include <asm/time.h>
#include <asm/dpmc.h>
+#define CPUFREQ_CPU 0
+
/* this is the table of CCLK frequencies, in Hz */
/* .index is the entry in the auxillary dpm_state_table[] */
static struct cpufreq_frequency_table bfin_freq_table[] = {
@@ -41,64 +43,114 @@ static struct bfin_dpm_state {
unsigned int tscale; /* change the divider on the core timer interrupt */
} dpm_state_table[3];
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
/*
- normalized to maximum frequncy offset for CYCLES,
- used in time-ts cycles clock source, but could be used
- somewhere also.
+ * normalized to maximum frequncy offset for CYCLES,
+ * used in time-ts cycles clock source, but could be used
+ * somewhere also.
*/
unsigned long long __bfin_cycles_off;
unsigned int __bfin_cycles_mod;
+#endif
/**************************************************************************/
+static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
+{
-static unsigned int bfin_getfreq_khz(unsigned int cpu)
+ unsigned long csel, min_cclk;
+ int index;
+
+ /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
+#if ANOMALY_05000273 || ANOMALY_05000274 || \
+ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
+ min_cclk = sclk * 2;
+#else
+ min_cclk = sclk;
+#endif
+ csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
+
+ for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
+ bfin_freq_table[index].frequency = cclk >> index;
+ dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
+ dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
+
+ pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
+ bfin_freq_table[index].frequency,
+ dpm_state_table[index].csel,
+ dpm_state_table[index].tscale);
+ }
+ return;
+}
+
+static void bfin_adjust_core_timer(void *info)
{
- /* The driver only support single cpu */
- if (cpu != 0)
- return -1;
+ unsigned int tscale;
+ unsigned int index = *(unsigned int *)info;
+
+ /* we have to adjust the core timer, because it is using cclk */
+ tscale = dpm_state_table[index].tscale;
+ bfin_write_TSCALE(tscale);
+ return;
+}
+static unsigned int bfin_getfreq_khz(unsigned int cpu)
+{
+ /* Both CoreA/B have the same core clock */
return get_cclk() / 1000;
}
-static int bfin_target(struct cpufreq_policy *policy,
+static int bfin_target(struct cpufreq_policy *poli,
unsigned int target_freq, unsigned int relation)
{
- unsigned int index, plldiv, tscale;
+ unsigned int index, plldiv, cpu;
unsigned long flags, cclk_hz;
struct cpufreq_freqs freqs;
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
cycles_t cycles;
+#endif
- if (cpufreq_frequency_table_target(policy, bfin_freq_table,
- target_freq, relation, &index))
- return -EINVAL;
-
- cclk_hz = bfin_freq_table[index].frequency;
-
- freqs.old = bfin_getfreq_khz(0);
- freqs.new = cclk_hz;
- freqs.cpu = 0;
-
- pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
- cclk_hz, target_freq, freqs.old);
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- local_irq_save_hw(flags);
- plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
- tscale = dpm_state_table[index].tscale;
- bfin_write_PLL_DIV(plldiv);
- /* we have to adjust the core timer, because it is using cclk */
- bfin_write_TSCALE(tscale);
- cycles = get_cycles();
- SSYNC();
- cycles += 10; /* ~10 cycles we lose after get_cycles() */
- __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
- __bfin_cycles_mod = index;
- local_irq_restore_hw(flags);
- /* TODO: just test case for cycles clock source, remove later */
- pr_debug("cpufreq: done\n");
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ continue;
+
+ if (cpufreq_frequency_table_target(policy, bfin_freq_table,
+ target_freq, relation, &index))
+ return -EINVAL;
+
+ cclk_hz = bfin_freq_table[index].frequency;
+
+ freqs.old = bfin_getfreq_khz(0);
+ freqs.new = cclk_hz;
+ freqs.cpu = cpu;
+
+ pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
+ cclk_hz, target_freq, freqs.old);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ if (cpu == CPUFREQ_CPU) {
+ local_irq_save_hw(flags);
+ plldiv = (bfin_read_PLL_DIV() & SSEL) |
+ dpm_state_table[index].csel;
+ bfin_write_PLL_DIV(plldiv);
+ on_each_cpu(bfin_adjust_core_timer, &index, 1);
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+ cycles = get_cycles();
+ SSYNC();
+ cycles += 10; /* ~10 cycles we lose after get_cycles() */
+ __bfin_cycles_off +=
+ (cycles << __bfin_cycles_mod) - (cycles << index);
+ __bfin_cycles_mod = index;
+#endif
+ local_irq_restore_hw(flags);
+ }
+ /* TODO: just test case for cycles clock source, remove later */
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+ pr_debug("cpufreq: done\n");
return 0;
}
@@ -110,37 +162,16 @@ static int bfin_verify_speed(struct cpufreq_policy *policy)
static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
{
- unsigned long cclk, sclk, csel, min_cclk;
- int index;
-
- if (policy->cpu != 0)
- return -EINVAL;
+ unsigned long cclk, sclk;
cclk = get_cclk() / 1000;
sclk = get_sclk() / 1000;
-#if ANOMALY_05000273 || ANOMALY_05000274 || \
- (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
- min_cclk = sclk * 2;
-#else
- min_cclk = sclk;
-#endif
- csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
-
- for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
- bfin_freq_table[index].frequency = cclk >> index;
- dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
- dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
-
- pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
- bfin_freq_table[index].frequency,
- dpm_state_table[index].csel,
- dpm_state_table[index].tscale);
- }
+ if (policy->cpu == CPUFREQ_CPU)
+ bfin_init_tables(cclk, sclk);
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
- /*Now ,only support one cpu */
policy->cur = cclk;
cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);