summaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-mvebu
diff options
context:
space:
mode:
authorGregory CLEMENT <gregory.clement@free-electrons.com>2014-04-14 17:10:11 +0200
committerJason Cooper <jason@lakedaemon.net>2014-05-08 18:18:58 +0200
commitc3e04cabb135625df8ff4b71ef4130f0ccbcc669 (patch)
tree149c7c433c9fa54a0a4612f8a64bafe26e04fc11 /arch/arm/mach-mvebu
parentARM: mvebu: Allow to power down L2 cache controller in idle mode (diff)
downloadlinux-c3e04cabb135625df8ff4b71ef4130f0ccbcc669.tar.xz
linux-c3e04cabb135625df8ff4b71ef4130f0ccbcc669.zip
ARM: mvebu: Add the PMSU related part of the cpu idle functions
The cpu idle support will need to access to Power Management Service Unit. This commit adds the architecture related functions that will be used in the idle path of the cpuidle driver. Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> Link: https://lkml.kernel.org/r/1397488214-20685-9-git-send-email-gregory.clement@free-electrons.com Signed-off-by: Jason Cooper <jason@lakedaemon.net>
Diffstat (limited to 'arch/arm/mach-mvebu')
-rw-r--r--arch/arm/mach-mvebu/pmsu.c132
1 files changed, 131 insertions, 1 deletions
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 7ccf14a065b1..78cf0279e1af 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -24,9 +24,12 @@
#include <linux/io.h>
#include <linux/smp.h>
#include <linux/resource.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
#include "common.h"
-#include "pmsu.h"
static void __iomem *pmsu_mp_base;
@@ -34,12 +37,33 @@ static void __iomem *pmsu_mp_base;
#define PMSU_REG_SIZE 0x1000
/* PMSU MP registers */
+#define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
+#define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18)
+#define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16)
+#define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20)
+
+#define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
+
+#define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0)
+
+#define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
+#define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16)
+#define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17)
+#define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20)
+#define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21)
+#define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22)
+#define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
+#define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
+
#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
/* PMSU fabric registers */
#define L2C_NFABRIC_PM_CTL 0x4
#define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20)
+extern void ll_disable_coherency(void);
+extern void ll_enable_coherency(void);
+
static struct of_device_id of_pmsu_table[] = {
{ .compatible = "marvell,armada-370-pmsu", },
{ .compatible = "marvell,armada-370-xp-pmsu", },
@@ -110,4 +134,110 @@ static void armada_370_xp_pmsu_enable_l2_powerdown_onidle(void)
writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
}
+static void armada_370_xp_cpu_resume(void)
+{
+ asm volatile("bl ll_add_cpu_to_smp_group\n\t"
+ "bl ll_enable_coherency\n\t"
+ "b cpu_resume\n\t");
+}
+
+/* No locking is needed because we only access per-CPU registers */
+void armada_370_xp_pmsu_idle_prepare(bool deepidle)
+{
+ unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+ u32 reg;
+
+ if (pmsu_mp_base == NULL)
+ return;
+
+ /*
+ * Adjust the PMSU configuration to wait for WFI signal, enable
+ * IRQ and FIQ as wakeup events, set wait for snoop queue empty
+ * indication and mask IRQ and FIQ from CPU
+ */
+ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+ reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
+ PMSU_STATUS_AND_MASK_IRQ_WAKEUP |
+ PMSU_STATUS_AND_MASK_FIQ_WAKEUP |
+ PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
+ PMSU_STATUS_AND_MASK_IRQ_MASK |
+ PMSU_STATUS_AND_MASK_FIQ_MASK;
+ writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+
+ reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+ /* ask HW to power down the L2 Cache if needed */
+ if (deepidle)
+ reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
+
+ /* request power down */
+ reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
+ writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+
+ /* Disable snoop disable by HW - SW is taking care of it */
+ reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
+ reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP;
+ writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
+}
+
+static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
+{
+ armada_370_xp_pmsu_idle_prepare(deepidle);
+
+ v7_exit_coherency_flush(all);
+
+ ll_disable_coherency();
+
+ dsb();
+
+ wfi();
+
+ /* If we are here, wfi failed. As processors run out of
+ * coherency for some time, tlbs might be stale, so flush them
+ */
+ local_flush_tlb_all();
+
+ ll_enable_coherency();
+
+ /* Test the CR_C bit and set it if it was cleared */
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0 \n\t"
+ "tst %0, #(1 << 2) \n\t"
+ "orreq %0, %0, #(1 << 2) \n\t"
+ "mcreq p15, 0, %0, c1, c0, 0 \n\t"
+ "isb "
+ : : "r" (0));
+
+ pr_warn("Failed to suspend the system\n");
+
+ return 0;
+}
+
+static int armada_370_xp_cpu_suspend(unsigned long deepidle)
+{
+ return cpu_suspend(deepidle, do_armada_370_xp_cpu_suspend);
+}
+
+/* No locking is needed because we only access per-CPU registers */
+static noinline void armada_370_xp_pmsu_idle_restore(void)
+{
+ unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+ u32 reg;
+
+ if (pmsu_mp_base == NULL)
+ return;
+
+ /* cancel ask HW to power down the L2 Cache if possible */
+ reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+ reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
+ writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+
+ /* cancel Enable wakeup events and mask interrupts */
+ reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+ reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
+ reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
+ reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
+ reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
+ writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+}
+
early_initcall(armada_370_xp_pmsu_init);