summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig15
-rw-r--r--drivers/clocksource/arm_arch_timer.c49
-rw-r--r--drivers/clocksource/tcb_clksrc.c61
3 files changed, 114 insertions, 11 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 971d796e071d..5e940f839a2d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,21 @@ config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
+config ARM_ARCH_TIMER_EVTSTREAM
+ bool "Support for ARM architected timer event stream generation"
+ default y if ARM_ARCH_TIMER
+ help
+ This option enables support for event stream generation based on
+ the ARM architected timer. It is used for waking up CPUs executing
+ the wfe instruction at a frequency represented as a power-of-2
+ divisor of the clock rate.
+ The main use of the event stream is wfe-based timeouts of userspace
+ locking implementations. It might also be useful for imposing timeout
+ on wfe to safeguard against any programming errors in case an expected
+ event is not generated.
+ This must be disabled for hardware validation purposes to detect any
+ hardware anomalies of missing events.
+
config ARM_GLOBAL_TIMER
bool
select CLKSRC_OF if OF
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index ce98d5e70927..b94b0d44c158 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
@@ -294,6 +295,19 @@ static void __arch_timer_setup(unsigned type,
clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
}
+static void arch_timer_configure_evtstream(void)
+{
+ int evt_stream_div, pos;
+
+ /* Find the closest power of two to the divisor */
+ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
+ pos = fls(evt_stream_div);
+ if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
+ pos--;
+ /* enable event stream */
+ arch_timer_evtstrm_enable(min(pos, 15));
+}
+
static int arch_timer_setup(struct clock_event_device *clk)
{
__arch_timer_setup(ARCH_CP15_TIMER, clk);
@@ -307,6 +321,8 @@ static int arch_timer_setup(struct clock_event_device *clk)
}
arch_counter_set_user_access();
+ if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ arch_timer_configure_evtstream();
return 0;
}
@@ -460,6 +476,33 @@ static struct notifier_block arch_timer_cpu_nb = {
.notifier_call = arch_timer_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static unsigned int saved_cntkctl;
+static int arch_timer_cpu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_PM_ENTER)
+ saved_cntkctl = arch_timer_get_cntkctl();
+ else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
+ arch_timer_set_cntkctl(saved_cntkctl);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_pm_notifier = {
+ .notifier_call = arch_timer_cpu_pm_notify,
+};
+
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
+}
+#else
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return 0;
+}
+#endif
+
static int __init arch_timer_register(void)
{
int err;
@@ -499,11 +542,17 @@ static int __init arch_timer_register(void)
if (err)
goto out_free_irq;
+ err = arch_timer_cpu_pm_init();
+ if (err)
+ goto out_unreg_notify;
+
/* Immediately configure the timer on the boot CPU */
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
return 0;
+out_unreg_notify:
+ unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
if (arch_timer_use_virtual)
free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 8a6187225dd0..00fdd1170284 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
- clk_disable(tcd->clk);
+ clk_disable_unprepare(tcd->clk);
}
switch (m) {
@@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
* of oneshot, we get lower overhead and improved accuracy.
*/
case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and restart */
__raw_writel(timer_clock
@@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
break;
case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
+ clk_prepare_enable(tcd->clk);
/* slow clock, count up to RC, then irq and stop */
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
@@ -180,15 +180,22 @@ static irqreturn_t ch2_irq(int irq, void *handle)
static struct irqaction tc_irqaction = {
.name = "tc_clkevt",
- .flags = IRQF_TIMER | IRQF_DISABLED,
+ .flags = IRQF_TIMER,
.handler = ch2_irq,
};
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
+ int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
+ /* try to enable t2 clk to avoid future errors in mode change */
+ ret = clk_prepare_enable(t2_clk);
+ if (ret)
+ return ret;
+ clk_disable_unprepare(t2_clk);
+
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
tc_irqaction.dev_id = &clkevt;
@@ -197,16 +204,21 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
clkevt.clkevt.cpumask = cpumask_of(0);
+ ret = setup_irq(irq, &tc_irqaction);
+ if (ret)
+ return ret;
+
clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
- setup_irq(irq, &tc_irqaction);
+ return ret;
}
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
{
/* NOTHING */
+ return 0;
}
#endif
@@ -265,6 +277,7 @@ static int __init tcb_clksrc_init(void)
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
int i;
+ int ret;
tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK, clksrc.name);
if (!tc) {
@@ -275,7 +288,11 @@ static int __init tcb_clksrc_init(void)
pdev = tc->pdev;
t0_clk = tc->clk[0];
- clk_enable(t0_clk);
+ ret = clk_prepare_enable(t0_clk);
+ if (ret) {
+ pr_debug("can't enable T0 clk\n");
+ goto err_free_tc;
+ }
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
@@ -313,17 +330,39 @@ static int __init tcb_clksrc_init(void)
/* tclib will give us three clocks no matter what the
* underlying platform supports.
*/
- clk_enable(tc->clk[1]);
+ ret = clk_prepare_enable(tc->clk[1]);
+ if (ret) {
+ pr_debug("can't enable T1 clk\n");
+ goto err_disable_t0;
+ }
/* setup both channel 0 & 1 */
tcb_setup_dual_chan(tc, best_divisor_idx);
}
/* and away we go! */
- clocksource_register_hz(&clksrc, divided_rate);
+ ret = clocksource_register_hz(&clksrc, divided_rate);
+ if (ret)
+ goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(tc, clk32k_divisor_idx);
+ if (ret)
+ goto err_unregister_clksrc;
return 0;
+
+err_unregister_clksrc:
+ clocksource_unregister(&clksrc);
+
+err_disable_t1:
+ if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
+ clk_disable_unprepare(tc->clk[1]);
+
+err_disable_t0:
+ clk_disable_unprepare(t0_clk);
+
+err_free_tc:
+ atmel_tc_free(tc);
+ return ret;
}
arch_initcall(tcb_clksrc_init);