summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/io.c
diff options
context:
space:
mode:
authorEzequiel Garcia <ezequiel.garcia@free-electrons.com>2013-12-18 23:08:52 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-01-28 15:06:25 +0100
commitc5ca95b507c8a2a69e49eeda539b41bd76922d7f (patch)
tree2ebbc82dcd1834300ec341a351a7f48ee5e5df0e /arch/arm/kernel/io.c
parentARM: SMP implementations are not supposed to return from smp_ops.cpu_die() (diff)
downloadlinux-c5ca95b507c8a2a69e49eeda539b41bd76922d7f.tar.xz
linux-c5ca95b507c8a2a69e49eeda539b41bd76922d7f.zip
ARM: 7930/1: Introduce atomic MMIO modify
Some SoC have MMIO regions that are shared across orthogonal subsystems. This commit implements a possible solution for the thread-safe access of such regions through a spinlock-protected API. Concurrent access is protected with a single spinlock for the entire MMIO address space. While this protects shared-registers, it also serializes access to unrelated/unshared registers. We add relaxed and non-relaxed variants, by using writel_relaxed and writel, respectively. The rationale for this is that some users may not require register write completion but only thread-safe access to a register. Tested-by: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> Acked-by: Jason Cooper <jason@lakedaemon.net> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/io.c')
-rw-r--r--arch/arm/kernel/io.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
index dcd5b4d86143..9203cf883330 100644
--- a/arch/arm/kernel/io.c
+++ b/arch/arm/kernel/io.c
@@ -1,6 +1,41 @@
#include <linux/export.h>
#include <linux/types.h>
#include <linux/io.h>
+#include <linux/spinlock.h>
+
+static DEFINE_RAW_SPINLOCK(__io_lock);
+
+/*
+ * Generic atomic MMIO modify.
+ *
+ * Allows thread-safe access to registers shared by unrelated subsystems.
+ * The access is protected by a single MMIO-wide lock.
+ */
+void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&__io_lock, flags);
+ value = readl_relaxed(reg) & ~mask;
+ value |= (set & mask);
+ writel_relaxed(value, reg);
+ raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify_relaxed);
+
+void atomic_io_modify(void __iomem *reg, u32 mask, u32 set)
+{
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&__io_lock, flags);
+ value = readl_relaxed(reg) & ~mask;
+ value |= (set & mask);
+ writel(value, reg);
+ raw_spin_unlock_irqrestore(&__io_lock, flags);
+}
+EXPORT_SYMBOL(atomic_io_modify);
/*
* Copy data from IO memory space to "real" memory space.