summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi YOSHII <takashi.yoshii.zj@renesas.com>2010-12-17 08:25:09 +0100
committerPaul Mundt <lethal@linux-sh.org>2010-12-17 11:38:33 +0100
commit65ada547d68dc075aa06df92fe325bff07cbc606 (patch)
tree8c8ab2cc7814d6f85f6cb3a4b40e5165e5a0a7ad
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus (diff)
downloadlinux-65ada547d68dc075aa06df92fe325bff07cbc606.tar.xz
linux-65ada547d68dc075aa06df92fe325bff07cbc606.zip
clocksource: sh_cmt: Remove nested spinlock fix
There are control flow that sh_cmt_set_next() does double spin-lock. The callers sh_cmt_{start,stop}() already have lock. But another callers sh_cmt_clock_event_{start,next}() does not. Now sh_cmt_set_next() does not lock by itself. All the callers should hold spin-lock before calling it. [damm@opensource.se: use __sh_cmt_set_next() to simplify code] [damm@opensource.se: added stable, suitable for v2.6.35 + v2.6.36] Cc: stable@kernel.org Signed-off-by: Takashi YOSHII <takashi.yoshii.zj@renesas.com> Signed-off-by: Magnus Damm <damm@opensource.se> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--drivers/clocksource/sh_cmt.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index d68d3aa1814b..f975d24890fa 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -283,16 +283,21 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
} while (delay);
}
-static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
+static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
{
- unsigned long flags;
-
if (delta > p->max_match_value)
dev_warn(&p->pdev->dev, "delta out of range\n");
- spin_lock_irqsave(&p->lock, flags);
p->next_match_value = delta;
sh_cmt_clock_event_program_verify(p, 0);
+}
+
+static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __sh_cmt_set_next(p, delta);
spin_unlock_irqrestore(&p->lock, flags);
}
@@ -359,7 +364,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
/* setup timeout if no clockevent */
if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
- sh_cmt_set_next(p, p->max_match_value);
+ __sh_cmt_set_next(p, p->max_match_value);
out:
spin_unlock_irqrestore(&p->lock, flags);
@@ -381,7 +386,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
/* adjust the timeout to maximum if only clocksource left */
if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
- sh_cmt_set_next(p, p->max_match_value);
+ __sh_cmt_set_next(p, p->max_match_value);
spin_unlock_irqrestore(&p->lock, flags);
}