summaryrefslogtreecommitdiffstats
path: root/drivers/clocksource/sh_tmu.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-05-31 08:23:20 +0200
committerPaul Mundt <lethal@linux-sh.org>2011-05-31 08:23:20 +0200
commitd4905ce38c73964b868037e49a5945e1cf47a7f2 (patch)
treef7b41c1720dabb7b14a92ff0c47aeff1d6e4d70f /drivers/clocksource/sh_tmu.c
parentsh: Fix up asm-generic/ptrace.h fallout. (diff)
downloadlinux-d4905ce38c73964b868037e49a5945e1cf47a7f2.tar.xz
linux-d4905ce38c73964b868037e49a5945e1cf47a7f2.zip
Revert "clocksource: sh_tmu: Runtime PM support"
This reverts commit 1b842e91fea9447eff5eb687e28ad61c02f5033e. There is a fundamental ordering race between the early and late probe paths and the runtime PM tie-in that results in __pm_runtime_resume() attempting to take a lock that hasn't been initialized yet (which by proxy also suggests that pm_runtime_init() hasn't yet been run on the device either, making the entire thing unsafe) -- resulting in instant death on SMP or on UP with spinlock debugging enabled: sh_tmu.0: used for clock events sh_tmu.0: used for periodic clock events BUG: spinlock trylock failure on UP on CPU#0, swapper/0 lock: 804db198, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0 ... Revert it for now until the ordering issues can be resolved, or we can get some more help from the runtime PM framework to make this possible. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/clocksource/sh_tmu.c')
-rw-r--r--drivers/clocksource/sh_tmu.c12
1 files changed, 2 insertions, 10 deletions
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 17296288a205..808135768617 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
{
int ret;
- /* wake up device and enable clock */
- pm_runtime_get_sync(&p->pdev->dev);
+ /* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
/* disable interrupts in TMU block */
sh_tmu_write(p, TCR, 0x0000);
- /* stop clock and mark device as idle */
+ /* stop clock */
clk_disable(p->clk);
- pm_runtime_put_sync(&p->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
- pm_runtime_enable(&pdev->dev);
return 0;
}
@@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
-
- if (!is_early_platform_device(pdev))
- pm_runtime_enable(&pdev->dev);
return ret;
}