summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-08 06:23:40 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-08 06:23:40 +0200
commit6afd563d4bbc1924b7de9e053324c007e0d36476 (patch)
tree619c2270f6f09ce8c9eb7f6aa448f07f67ffa8e8 /drivers
parentMerge tag 'armsoc-arm64' of git://git.kernel.org/pub/scm/linux/kernel/git/arm... (diff)
parentbus: qcom-ebi2: depend on HAS_IOMEM (diff)
downloadlinux-6afd563d4bbc1924b7de9e053324c007e0d36476.tar.xz
linux-6afd563d4bbc1924b7de9e053324c007e0d36476.zip
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver updates from Arnd Bergmann: "Driver updates for ARM SoCs, including a couple of newly added drivers: - The Qualcomm external bus interface 2 (EBI2), used in some of their mobile phone chips for connecting flash memory, LCD displays or other peripherals - Secure monitor firmware for Amlogic SoCs, and an NVMEM driver for the EFUSE based on that firmware interface. - Perf support for the AppliedMicro X-Gene performance monitor unit - Reset driver for STMicroelectronics STM32 - Reset driver for SocioNext UniPhier SoCs Aside from these, there are minor updates to SoC-specific bus, clocksource, firmware, pinctrl, reset, rtc and pmic drivers" * tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (50 commits) bus: qcom-ebi2: depend on HAS_IOMEM pinctrl: mvebu: orion5x: Generalise mv88f5181l support for 88f5181 clk: mvebu: Add clk support for the orion5x SoC mv88f5181 dt-bindings: EXYNOS: Add Exynos5433 PMU compatible clocksource: exynos_mct: Add the support for ARM64 perf: xgene: Add APM X-Gene SoC Performance Monitoring Unit driver Documentation: Add documentation for APM X-Gene SoC PMU DTS binding MAINTAINERS: Add entry for APM X-Gene SoC PMU driver bus: qcom: add EBI2 driver bus: qcom: add EBI2 device tree bindings rtc: rtc-pm8xxx: Add support for pm8018 rtc nvmem: amlogic: Add Amlogic Meson EFUSE driver firmware: Amlogic: Add secure monitor driver soc: qcom: smd: Reset rx tail rather than tx memory: atmel-sdramc: fix a possible NULL dereference reset: hi6220: allow to compile test driver on other architectures reset: zynq: add driver Kconfig option reset: sunxi: add driver Kconfig option reset: stm32: add driver Kconfig option reset: socfpga: add driver Kconfig option ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/Kconfig14
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/qcom-ebi2.c408
-rw-r--r--drivers/bus/tegra-aconnect.c22
-rw-r--r--drivers/clk/mvebu/orion.c70
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/meson/Kconfig9
-rw-r--r--drivers/firmware/meson/Makefile1
-rw-r--r--drivers/firmware/meson/meson_sm.c248
-rw-r--r--drivers/firmware/qcom_scm.c19
-rw-r--r--drivers/media/rc/meson-ir.c29
-rw-r--r--drivers/memory/atmel-ebi.c10
-rw-r--r--drivers/memory/atmel-sdramc.c4
-rw-r--r--drivers/memory/omap-gpmc.c20
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/meson-efuse.c93
-rw-r--r--drivers/perf/Kconfig7
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/xgene_pmu.c1398
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c23
-rw-r--r--drivers/reset/Kconfig65
-rw-r--r--drivers/reset/Makefile20
-rw-r--r--drivers/reset/core.c12
-rw-r--r--drivers/reset/hisilicon/Kconfig3
-rw-r--r--drivers/reset/reset-ath79.c1
-rw-r--r--drivers/reset/reset-socfpga.c19
-rw-r--r--drivers/reset/reset-stm32.c108
-rw-r--r--drivers/reset/reset-uniphier.c440
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c2
-rw-r--r--drivers/soc/qcom/smd.c265
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/rockchip/pm_domains.c100
-rw-r--r--drivers/soc/tegra/pmc.c28
37 files changed, 3185 insertions, 278 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 3b205e212337..7010dcac9328 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -108,6 +108,14 @@ config OMAP_OCP2SCP
OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via
OCP2SCP.
+config QCOM_EBI2
+ bool "Qualcomm External Bus Interface 2 (EBI2)"
+ depends on HAS_IOMEM
+ help
+ Say y here to enable support for the Qualcomm External Bus
+ Interface 2, which can be used to connect things like NAND Flash,
+ SRAM, ethernet adapters, FPGAs and LCD displays.
+
config SIMPLE_PM_BUS
bool "Simple Power-Managed Bus Driver"
depends on OF && PM
@@ -132,12 +140,8 @@ config SUNXI_RSB
with various RSB based devices, such as AXP223, AXP8XX PMICs,
and AC100/AC200 ICs.
-# TODO: This uses pm_clk_*() symbols that aren't exported in v4.7 and hence
-# the driver will fail to build as a module. However there are patches to
-# address that queued for v4.8, so this can be turned into a tristate symbol
-# after v4.8-rc1.
config TEGRA_ACONNECT
- bool "Tegra ACONNECT Bus Driver"
+ tristate "Tegra ACONNECT Bus Driver"
depends on ARCH_TEGRA_210_SOC
depends on OF && PM
select PM_CLK
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ac84cc4348e3..c6cfa6b2606e 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
+obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o
obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o
obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o
obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
new file mode 100644
index 000000000000..a6444244c411
--- /dev/null
+++ b/drivers/bus/qcom-ebi2.c
@@ -0,0 +1,408 @@
+/*
+ * Qualcomm External Bus Interface 2 (EBI2) driver
+ * an older version of the Qualcomm Parallel Interface Controller (QPIC)
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ * See the device tree bindings for this block for more details on the
+ * hardware.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+
+/*
+ * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit.
+ */
+#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1)
+#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3)
+#define EBI2_CS2_ENABLE_MASK BIT(4)
+#define EBI2_CS3_ENABLE_MASK BIT(5)
+#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7)
+#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9)
+#define EBI2_CSN_MASK GENMASK(9, 0)
+
+#define EBI2_XMEM_CFG 0x0000 /* Power management etc */
+
+/*
+ * SLOW CSn CFG
+ *
+ * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the
+ * memory continues to drive the data bus after OE is de-asserted.
+ * Inserted when reading one CS and switching to another CS or read
+ * followed by write on the same CS. Valid values 0 thru 15.
+ * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after
+ * every write minimum 1. The data out is driven from the time WE is
+ * asserted until CS is asserted. With a hold of 1, the CS stays
+ * active for 1 extra cycle etc. Valid values 0 thru 15.
+ * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first
+ * write to a page or burst memory
+ * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first
+ * read to a page or burst memory
+ * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle
+ * so 1 thru 16 cycles.
+ */
+#define EBI2_XMEM_CS0_SLOW_CFG 0x0008
+#define EBI2_XMEM_CS1_SLOW_CFG 0x000C
+#define EBI2_XMEM_CS2_SLOW_CFG 0x0010
+#define EBI2_XMEM_CS3_SLOW_CFG 0x0014
+#define EBI2_XMEM_CS4_SLOW_CFG 0x0018
+#define EBI2_XMEM_CS5_SLOW_CFG 0x001C
+
+#define EBI2_XMEM_RECOVERY_SHIFT 28
+#define EBI2_XMEM_WR_HOLD_SHIFT 24
+#define EBI2_XMEM_WR_DELTA_SHIFT 16
+#define EBI2_XMEM_RD_DELTA_SHIFT 8
+#define EBI2_XMEM_WR_WAIT_SHIFT 4
+#define EBI2_XMEM_RD_WAIT_SHIFT 0
+
+/*
+ * FAST CSn CFG
+ * Bits 31-28: ?
+ * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read
+ * transfer. For a single read trandfer this will be the time
+ * from CS assertion to OE assertion.
+ * Bits 18-24: ?
+ * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE
+ * assertion, with respect to the cycle where ADV is asserted.
+ * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3.
+ * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet
+ * hold time requirements with ADV assertion.
+ *
+ * The manual mentions "write precharge cycles" and "precharge cycles".
+ * We have not been able to figure out which bit fields these correspond to
+ * in the hardware, or what valid values exist. The current hypothesis is that
+ * this is something just used on the FAST chip selects. There is also a "byte
+ * device enable" flag somewhere for 8bit memories.
+ */
+#define EBI2_XMEM_CS0_FAST_CFG 0x0028
+#define EBI2_XMEM_CS1_FAST_CFG 0x002C
+#define EBI2_XMEM_CS2_FAST_CFG 0x0030
+#define EBI2_XMEM_CS3_FAST_CFG 0x0034
+#define EBI2_XMEM_CS4_FAST_CFG 0x0038
+#define EBI2_XMEM_CS5_FAST_CFG 0x003C
+
+#define EBI2_XMEM_RD_HOLD_SHIFT 24
+#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16
+#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5
+
+/**
+ * struct cs_data - struct with info on a chipselect setting
+ * @enable_mask: mask to enable the chipselect in the EBI2 config
+ * @slow_cfg0: offset to XMEMC slow CS config
+ * @fast_cfg1: offset to XMEMC fast CS config
+ */
+struct cs_data {
+ u32 enable_mask;
+ u16 slow_cfg;
+ u16 fast_cfg;
+};
+
+static const struct cs_data cs_info[] = {
+ {
+ /* CS0 */
+ .enable_mask = EBI2_CS0_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS0_FAST_CFG,
+ },
+ {
+ /* CS1 */
+ .enable_mask = EBI2_CS1_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS1_FAST_CFG,
+ },
+ {
+ /* CS2 */
+ .enable_mask = EBI2_CS2_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS2_FAST_CFG,
+ },
+ {
+ /* CS3 */
+ .enable_mask = EBI2_CS3_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS3_FAST_CFG,
+ },
+ {
+ /* CS4 */
+ .enable_mask = EBI2_CS4_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS4_FAST_CFG,
+ },
+ {
+ /* CS5 */
+ .enable_mask = EBI2_CS5_ENABLE_MASK,
+ .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG,
+ .fast_cfg = EBI2_XMEM_CS5_FAST_CFG,
+ },
+};
+
+/**
+ * struct ebi2_xmem_prop - describes an XMEM config property
+ * @prop: the device tree binding name
+ * @max: maximum value for the property
+ * @slowreg: true if this property is in the SLOW CS config register
+ * else it is assumed to be in the FAST config register
+ * @shift: the bit field start in the SLOW or FAST register for this
+ * property
+ */
+struct ebi2_xmem_prop {
+ const char *prop;
+ u32 max;
+ bool slowreg;
+ u16 shift;
+};
+
+static const struct ebi2_xmem_prop xmem_props[] = {
+ {
+ .prop = "qcom,xmem-recovery-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-hold-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_HOLD_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-delta-cycles",
+ .max = 255,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_DELTA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-write-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_WR_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-wait-cycles",
+ .max = 15,
+ .slowreg = true,
+ .shift = EBI2_XMEM_RD_WAIT_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-address-hold-enable",
+ .max = 1, /* boolean prop */
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-adv-to-oe-recovery-cycles",
+ .max = 3,
+ .slowreg = false,
+ .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT,
+ },
+ {
+ .prop = "qcom,xmem-read-hold-cycles",
+ .max = 15,
+ .slowreg = false,
+ .shift = EBI2_XMEM_RD_HOLD_SHIFT,
+ },
+};
+
+static void qcom_ebi2_setup_chipselect(struct device_node *np,
+ struct device *dev,
+ void __iomem *ebi2_base,
+ void __iomem *ebi2_xmem,
+ u32 csindex)
+{
+ const struct cs_data *csd;
+ u32 slowcfg, fastcfg;
+ u32 val;
+ int ret;
+ int i;
+
+ csd = &cs_info[csindex];
+ val = readl(ebi2_base);
+ val |= csd->enable_mask;
+ writel(val, ebi2_base);
+ dev_dbg(dev, "enabled CS%u\n", csindex);
+
+ /* Next set up the XMEMC */
+ slowcfg = 0;
+ fastcfg = 0;
+
+ for (i = 0; i < ARRAY_SIZE(xmem_props); i++) {
+ const struct ebi2_xmem_prop *xp = &xmem_props[i];
+
+ /* All are regular u32 values */
+ ret = of_property_read_u32(np, xp->prop, &val);
+ if (ret) {
+ dev_dbg(dev, "could not read %s for CS%d\n",
+ xp->prop, csindex);
+ continue;
+ }
+
+ /* First check boolean props */
+ if (xp->max == 1 && val) {
+ if (xp->slowreg)
+ slowcfg |= BIT(xp->shift);
+ else
+ fastcfg |= BIT(xp->shift);
+ dev_dbg(dev, "set %s flag\n", xp->prop);
+ continue;
+ }
+
+ /* We're dealing with an u32 */
+ if (val > xp->max) {
+ dev_err(dev,
+ "too high value for %s: %u, capped at %u\n",
+ xp->prop, val, xp->max);
+ val = xp->max;
+ }
+ if (xp->slowreg)
+ slowcfg |= (val << xp->shift);
+ else
+ fastcfg |= (val << xp->shift);
+ dev_dbg(dev, "set %s to %u\n", xp->prop, val);
+ }
+
+ dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n",
+ csindex, slowcfg, fastcfg);
+
+ if (slowcfg)
+ writel(slowcfg, ebi2_xmem + csd->slow_cfg);
+ if (fastcfg)
+ writel(fastcfg, ebi2_xmem + csd->fast_cfg);
+}
+
+static int qcom_ebi2_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *ebi2_base;
+ void __iomem *ebi2_xmem;
+ struct clk *ebi2xclk;
+ struct clk *ebi2clk;
+ bool have_children = false;
+ u32 val;
+ int ret;
+
+ ebi2xclk = devm_clk_get(dev, "ebi2x");
+ if (IS_ERR(ebi2xclk))
+ return PTR_ERR(ebi2xclk);
+
+ ret = clk_prepare_enable(ebi2xclk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2X clk (%d)\n", ret);
+ return ret;
+ }
+
+ ebi2clk = devm_clk_get(dev, "ebi2");
+ if (IS_ERR(ebi2clk)) {
+ ret = PTR_ERR(ebi2clk);
+ goto err_disable_2x_clk;
+ }
+
+ ret = clk_prepare_enable(ebi2clk);
+ if (ret) {
+ dev_err(dev, "could not enable EBI2 clk\n");
+ goto err_disable_2x_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ebi2_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_base)) {
+ ret = PTR_ERR(ebi2_base);
+ goto err_disable_clk;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ebi2_xmem = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ebi2_xmem)) {
+ ret = PTR_ERR(ebi2_xmem);
+ goto err_disable_clk;
+ }
+
+ /* Allegedly this turns the power save mode off */
+ writel(0UL, ebi2_xmem + EBI2_XMEM_CFG);
+
+ /* Disable all chipselects */
+ val = readl(ebi2_base);
+ val &= ~EBI2_CSN_MASK;
+ writel(val, ebi2_base);
+
+ /* Walk over the child nodes and see what chipselects we use */
+ for_each_available_child_of_node(np, child) {
+ u32 csindex;
+
+ /* Figure out the chipselect */
+ ret = of_property_read_u32(child, "reg", &csindex);
+ if (ret)
+ return ret;
+
+ if (csindex > 5) {
+ dev_err(dev,
+ "invalid chipselect %u, we only support 0-5\n",
+ csindex);
+ continue;
+ }
+
+ qcom_ebi2_setup_chipselect(child,
+ dev,
+ ebi2_base,
+ ebi2_xmem,
+ csindex);
+
+ /* We have at least one child */
+ have_children = true;
+ }
+
+ if (have_children)
+ return of_platform_default_populate(np, NULL, dev);
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(ebi2clk);
+err_disable_2x_clk:
+ clk_disable_unprepare(ebi2xclk);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ebi2_of_match[] = {
+ { .compatible = "qcom,msm8660-ebi2", },
+ { .compatible = "qcom,apq8060-ebi2", },
+ { }
+};
+
+static struct platform_driver qcom_ebi2_driver = {
+ .probe = qcom_ebi2_probe,
+ .driver = {
+ .name = "qcom-ebi2",
+ .of_match_table = qcom_ebi2_of_match,
+ },
+};
+module_platform_driver(qcom_ebi2_driver);
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm EBI2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index 7e4104b74fa8..084ae286fa23 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -15,24 +15,6 @@
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
-static int tegra_aconnect_add_clock(struct device *dev, char *name)
-{
- struct clk *clk;
- int ret;
-
- clk = clk_get(dev, name);
- if (IS_ERR(clk)) {
- dev_err(dev, "%s clock not found\n", name);
- return PTR_ERR(clk);
- }
-
- ret = pm_clk_add_clk(dev, clk);
- if (ret)
- clk_put(clk);
-
- return ret;
-}
-
static int tegra_aconnect_probe(struct platform_device *pdev)
{
int ret;
@@ -44,11 +26,11 @@ static int tegra_aconnect_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = tegra_aconnect_add_clock(&pdev->dev, "ape");
+ ret = of_pm_clk_add_clk(&pdev->dev, "ape");
if (ret)
goto clk_destroy;
- ret = tegra_aconnect_add_clock(&pdev->dev, "apb2ape");
+ ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape");
if (ret)
goto clk_destroy;
diff --git a/drivers/clk/mvebu/orion.c b/drivers/clk/mvebu/orion.c
index fd129566c1ce..a6e5bee23385 100644
--- a/drivers/clk/mvebu/orion.c
+++ b/drivers/clk/mvebu/orion.c
@@ -21,6 +21,76 @@ static const struct coreclk_ratio orion_coreclk_ratios[] __initconst = {
};
/*
+ * Orion 5181
+ */
+
+#define SAR_MV88F5181_TCLK_FREQ 8
+#define SAR_MV88F5181_TCLK_FREQ_MASK 0x3
+
+static u32 __init mv88f5181_get_tclk_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_TCLK_FREQ) &
+ SAR_MV88F5181_TCLK_FREQ_MASK;
+ if (opt == 0)
+ return 133333333;
+ else if (opt == 1)
+ return 150000000;
+ else if (opt == 2)
+ return 166666667;
+ else
+ return 0;
+}
+
+#define SAR_MV88F5181_CPU_FREQ 4
+#define SAR_MV88F5181_CPU_FREQ_MASK 0xf
+
+static u32 __init mv88f5181_get_cpu_freq(void __iomem *sar)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) &
+ SAR_MV88F5181_CPU_FREQ_MASK;
+ if (opt == 0)
+ return 333333333;
+ else if (opt == 1 || opt == 2)
+ return 400000000;
+ else if (opt == 3)
+ return 500000000;
+ else
+ return 0;
+}
+
+static void __init mv88f5181_get_clk_ratio(void __iomem *sar, int id,
+ int *mult, int *div)
+{
+ u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) &
+ SAR_MV88F5181_CPU_FREQ_MASK;
+ if (opt == 0 || opt == 1) {
+ *mult = 1;
+ *div = 2;
+ } else if (opt == 2 || opt == 3) {
+ *mult = 1;
+ *div = 3;
+ } else {
+ *mult = 0;
+ *div = 1;
+ }
+}
+
+static const struct coreclk_soc_desc mv88f5181_coreclks = {
+ .get_tclk_freq = mv88f5181_get_tclk_freq,
+ .get_cpu_freq = mv88f5181_get_cpu_freq,
+ .get_clk_ratio = mv88f5181_get_clk_ratio,
+ .ratios = orion_coreclk_ratios,
+ .num_ratios = ARRAY_SIZE(orion_coreclk_ratios),
+};
+
+static void __init mv88f5181_clk_init(struct device_node *np)
+{
+ return mvebu_coreclk_setup(np, &mv88f5181_coreclks);
+}
+
+CLK_OF_DECLARE(mv88f5181_clk, "marvell,mv88f5181-core-clock", mv88f5181_clk_init);
+
+/*
* Orion 5182
*/
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 8a753fd5b79d..245190839359 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -361,7 +361,7 @@ config CLKSRC_METAG_GENERIC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
- depends on ARM
+ depends on ARM || ARM64
help
Support for Multi Core Timer controller on Exynos SoCs.
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 41840d02c331..8f3488b80896 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -223,6 +223,7 @@ static u64 notrace exynos4_read_sched_clock(void)
return exynos4_read_count_32();
}
+#if defined(CONFIG_ARM)
static struct delay_timer exynos4_delay_timer;
static cycles_t exynos4_read_current_timer(void)
@@ -231,14 +232,17 @@ static cycles_t exynos4_read_current_timer(void)
"cycles_t needs to move to 32-bit for ARM64 usage");
return exynos4_read_count_32();
}
+#endif
static int __init exynos4_clocksource_init(void)
{
exynos4_mct_frc_start();
+#if defined(CONFIG_ARM)
exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
exynos4_delay_timer.freq = clk_rate;
register_current_timer_delay(&exynos4_delay_timer);
+#endif
if (clocksource_register_hz(&mct_frc, clk_rate))
panic("%s: can't register clocksource\n", mct_frc.name);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 0e22f241403b..bca172d42c74 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -209,5 +209,6 @@ config HAVE_ARM_SMCCC
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/meson/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 44a59dcfc398..898ac41fa8b3 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-y += broadcom/
+obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
new file mode 100644
index 000000000000..170d7e8bcdfb
--- /dev/null
+++ b/drivers/firmware/meson/Kconfig
@@ -0,0 +1,9 @@
+#
+# Amlogic Secure Monitor driver
+#
+config MESON_SM
+ bool
+ default ARCH_MESON
+ depends on ARM64_4K_PAGES
+ help
+ Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile
new file mode 100644
index 000000000000..9ab3884f96bc
--- /dev/null
+++ b/drivers/firmware/meson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MESON_SM) += meson_sm.o
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
new file mode 100644
index 000000000000..b0d254930ed3
--- /dev/null
+++ b/drivers/firmware/meson/meson_sm.c
@@ -0,0 +1,248 @@
+/*
+ * Amlogic Secure Monitor driver
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "meson-sm: " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+struct meson_sm_cmd {
+ unsigned int index;
+ u32 smc_id;
+};
+#define CMD(d, s) { .index = (d), .smc_id = (s), }
+
+struct meson_sm_chip {
+ unsigned int shmem_size;
+ u32 cmd_shmem_in_base;
+ u32 cmd_shmem_out_base;
+ struct meson_sm_cmd cmd[];
+};
+
+struct meson_sm_chip gxbb_chip = {
+ .shmem_size = SZ_4K,
+ .cmd_shmem_in_base = 0x82000020,
+ .cmd_shmem_out_base = 0x82000021,
+ .cmd = {
+ CMD(SM_EFUSE_READ, 0x82000030),
+ CMD(SM_EFUSE_WRITE, 0x82000031),
+ CMD(SM_EFUSE_USER_MAX, 0x82000033),
+ { /* sentinel */ },
+ },
+};
+
+struct meson_sm_firmware {
+ const struct meson_sm_chip *chip;
+ void __iomem *sm_shmem_in_base;
+ void __iomem *sm_shmem_out_base;
+};
+
+static struct meson_sm_firmware fw;
+
+static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip,
+ unsigned int cmd_index)
+{
+ const struct meson_sm_cmd *cmd = chip->cmd;
+
+ while (cmd->smc_id && cmd->index != cmd_index)
+ cmd++;
+
+ return cmd->smc_id;
+}
+
+static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res);
+ return res.a0;
+}
+
+static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size)
+{
+ u32 sm_phy_base;
+
+ sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0);
+ if (!sm_phy_base)
+ return 0;
+
+ return ioremap_cache(sm_phy_base, size);
+}
+
+/**
+ * meson_sm_call - generic SMC32 call to the secure-monitor
+ *
+ * @cmd_index: Index of the SMC32 function ID
+ * @ret: Returned value
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: 0 on success, a negative value on error
+ */
+int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 cmd, lret;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ cmd = meson_sm_get_cmd(fw.chip, cmd_index);
+ if (!cmd)
+ return -EINVAL;
+
+ lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4);
+
+ if (ret)
+ *ret = lret;
+
+ return 0;
+}
+EXPORT_SYMBOL(meson_sm_call);
+
+/**
+ * meson_sm_call_read - retrieve data from secure-monitor
+ *
+ * @buffer: Buffer to store the retrieved data
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of read data on success, a negative value on error
+ */
+int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 size;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ if (!fw.chip->cmd_shmem_out_base)
+ return -EINVAL;
+
+ if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!size || size > fw.chip->shmem_size)
+ return -EINVAL;
+
+ if (buffer)
+ memcpy(buffer, fw.sm_shmem_out_base, size);
+
+ return size;
+}
+EXPORT_SYMBOL(meson_sm_call_read);
+
+/**
+ * meson_sm_call_write - send data to secure-monitor
+ *
+ * @buffer: Buffer containing data to send
+ * @size: Size of the data to send
+ * @cmd_index: Index of the SMC32 function ID
+ * @arg0: SMC32 Argument 0
+ * @arg1: SMC32 Argument 1
+ * @arg2: SMC32 Argument 2
+ * @arg3: SMC32 Argument 3
+ * @arg4: SMC32 Argument 4
+ *
+ * Return: size of sent data on success, a negative value on error
+ */
+int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 written;
+
+ if (!fw.chip)
+ return -ENOENT;
+
+ if (size > fw.chip->shmem_size)
+ return -EINVAL;
+
+ if (!fw.chip->cmd_shmem_in_base)
+ return -EINVAL;
+
+ memcpy(fw.sm_shmem_in_base, buffer, size);
+
+ if (meson_sm_call(cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0)
+ return -EINVAL;
+
+ if (!written)
+ return -EINVAL;
+
+ return written;
+}
+EXPORT_SYMBOL(meson_sm_call_write);
+
+static const struct of_device_id meson_sm_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip },
+ { /* sentinel */ },
+};
+
+int __init meson_sm_init(void)
+{
+ const struct meson_sm_chip *chip;
+ const struct of_device_id *matched_np;
+ struct device_node *np;
+
+ np = of_find_matching_node_and_match(NULL, meson_sm_ids, &matched_np);
+ if (!np)
+ return -ENODEV;
+
+ chip = matched_np->data;
+ if (!chip) {
+ pr_err("unable to setup secure-monitor data\n");
+ goto out;
+ }
+
+ if (chip->cmd_shmem_in_base) {
+ fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw.sm_shmem_in_base))
+ goto out;
+ }
+
+ if (chip->cmd_shmem_out_base) {
+ fw.sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base,
+ chip->shmem_size);
+ if (WARN_ON(!fw.sm_shmem_out_base))
+ goto out_in_base;
+ }
+
+ fw.chip = chip;
+ pr_info("secure-monitor enabled\n");
+
+ return 0;
+
+out_in_base:
+ iounmap(fw.sm_shmem_in_base);
+out:
+ return -EINVAL;
+}
+device_initcall(meson_sm_init);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index e64a501adbf4..d95c70227c05 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1,4 +1,7 @@
-/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+/*
+ * Qualcomm SCM driver
+ *
+ * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -12,7 +15,7 @@
*
*/
#include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
@@ -376,8 +379,6 @@ static const struct of_device_id qcom_scm_dt_match[] = {
{}
};
-MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
-
static struct platform_driver qcom_scm_driver = {
.driver = {
.name = "qcom_scm",
@@ -414,14 +415,4 @@ static int __init qcom_scm_init(void)
return platform_driver_register(&qcom_scm_driver);
}
-
subsys_initcall(qcom_scm_init);
-
-static void __exit qcom_scm_exit(void)
-{
- platform_driver_unregister(&qcom_scm_driver);
-}
-module_exit(qcom_scm_exit);
-
-MODULE_DESCRIPTION("Qualcomm SCM driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index fcc3b82d1454..003fff07ade2 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -24,6 +24,7 @@
#define DRIVER_NAME "meson-ir"
+/* valid on all Meson platforms */
#define IR_DEC_LDR_ACTIVE 0x00
#define IR_DEC_LDR_IDLE 0x04
#define IR_DEC_LDR_REPEAT 0x08
@@ -32,12 +33,21 @@
#define IR_DEC_FRAME 0x14
#define IR_DEC_STATUS 0x18
#define IR_DEC_REG1 0x1c
+/* only available on Meson 8b and newer */
+#define IR_DEC_REG2 0x20
#define REG0_RATE_MASK (BIT(11) - 1)
-#define REG1_MODE_MASK (BIT(7) | BIT(8))
-#define REG1_MODE_NEC (0 << 7)
-#define REG1_MODE_GENERAL (2 << 7)
+#define DECODE_MODE_NEC 0x0
+#define DECODE_MODE_RAW 0x2
+
+/* Meson 6b uses REG1 to configure the mode */
+#define REG1_MODE_MASK GENMASK(8, 7)
+#define REG1_MODE_SHIFT 7
+
+/* Meson 8b / GXBB use REG2 to configure the mode */
+#define REG2_MODE_MASK GENMASK(3, 0)
+#define REG2_MODE_SHIFT 0
#define REG1_TIME_IV_SHIFT 16
#define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT)
@@ -158,8 +168,15 @@ static int meson_ir_probe(struct platform_device *pdev)
/* Reset the decoder */
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET);
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0);
- /* Set general operation mode */
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, REG1_MODE_GENERAL);
+
+ /* Set general operation mode (= raw/software decoding) */
+ if (of_device_is_compatible(node, "amlogic,meson6-ir"))
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK,
+ DECODE_MODE_RAW << REG1_MODE_SHIFT);
+ else
+ meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK,
+ DECODE_MODE_RAW << REG2_MODE_SHIFT);
+
/* Set rate */
meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1);
/* IRQ on rising and falling edges */
@@ -197,6 +214,8 @@ static int meson_ir_remove(struct platform_device *pdev)
static const struct of_device_id meson_ir_match[] = {
{ .compatible = "amlogic,meson6-ir" },
+ { .compatible = "amlogic,meson8b-ir" },
+ { .compatible = "amlogic,meson-gxbb-ir" },
{ },
};
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index f87ad6f5d2dc..b5ed3bd082b5 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -410,10 +410,7 @@ static int at91sam9_ebi_init(struct at91_ebi *ebi)
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
- if (IS_ERR(fields->mode))
- return PTR_ERR(fields->mode);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fields->mode);
}
static int sama5d3_ebi_init(struct at91_ebi *ebi)
@@ -441,10 +438,7 @@ static int sama5d3_ebi_init(struct at91_ebi *ebi)
field.reg = SAMA5_SMC_MODE(SAMA5_SMC_GENERIC);
fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field);
- if (IS_ERR(fields->mode))
- return PTR_ERR(fields->mode);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fields->mode);
}
static int at91_ebi_dev_setup(struct at91_ebi *ebi, struct device_node *np,
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 53a341f3b305..12080b05e3e6 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -53,12 +53,10 @@ static const struct of_device_id atmel_ramc_of_match[] = {
static int atmel_ramc_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct at91_ramc_caps *caps;
struct clk *clk;
- match = of_match_device(atmel_ramc_of_match, &pdev->dev);
- caps = match->data;
+ caps = of_device_get_match_data(&pdev->dev);
if (caps->has_ddrck) {
clk = devm_clk_get(&pdev->dev, "ddrck");
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index f00f3e742265..5457c361ad58 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -350,8 +350,8 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
return (time_ps + tick_ps - 1) / tick_ps;
}
-unsigned int gpmc_clk_ticks_to_ns(unsigned ticks, int cs,
- enum gpmc_clk_domain cd)
+static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs,
+ enum gpmc_clk_domain cd)
{
return ticks * gpmc_get_clk_period(cs, cd) / 1000;
}
@@ -2143,9 +2143,7 @@ err_child_fail:
ret = -ENODEV;
err_cs:
- if (waitpin_desc)
- gpiochip_free_own_desc(waitpin_desc);
-
+ gpiochip_free_own_desc(waitpin_desc);
err:
gpmc_cs_free(cs);
@@ -2265,7 +2263,7 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
gpmc->gpio_chip.get = gpmc_gpio_get;
gpmc->gpio_chip.base = -1;
- ret = gpiochip_add(&gpmc->gpio_chip);
+ ret = devm_gpiochip_add_data(gpmc->dev, &gpmc->gpio_chip, NULL);
if (ret < 0) {
dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
return ret;
@@ -2274,11 +2272,6 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc)
return 0;
}
-static void gpmc_gpio_exit(struct gpmc_device *gpmc)
-{
- gpiochip_remove(&gpmc->gpio_chip);
-}
-
static int gpmc_probe(struct platform_device *pdev)
{
int rc;
@@ -2365,15 +2358,13 @@ static int gpmc_probe(struct platform_device *pdev)
rc = gpmc_setup_irq(gpmc);
if (rc) {
dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
- goto setup_irq_failed;
+ goto gpio_init_failed;
}
gpmc_probe_dt_children(pdev);
return 0;
-setup_irq_failed:
- gpmc_gpio_exit(gpmc);
gpio_init_failed:
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
@@ -2387,7 +2378,6 @@ static int gpmc_remove(struct platform_device *pdev)
struct gpmc_device *gpmc = platform_get_drvdata(pdev);
gpmc_free_irq(gpmc);
- gpmc_gpio_exit(gpmc);
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index f550c4596a7a..ba140eaee5c8 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -101,4 +101,14 @@ config NVMEM_VF610_OCOTP
This driver can also be build as a module. If so, the module will
be called nvmem-vf610-ocotp.
+config MESON_EFUSE
+ tristate "Amlogic eFuse Support"
+ depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
+ help
+ This is a driver to retrieve specific values from the eFuse found on
+ the Amlogic Meson SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_meson_efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 45ab1ae08fa9..8f942a0cdaec 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
+obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
+nvmem_meson_efuse-y := meson-efuse.o
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
new file mode 100644
index 000000000000..f207c3b10482
--- /dev/null
+++ b/drivers/nvmem/meson-efuse.c
@@ -0,0 +1,93 @@
+/*
+ * Amlogic eFuse Driver
+ *
+ * Copyright (c) 2016 Endless Computers, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <linux/firmware/meson/meson_sm.h>
+
+static int meson_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ u8 *buf = val;
+ int ret;
+
+ ret = meson_sm_call_read(buf, SM_EFUSE_READ, offset,
+ bytes, 0, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "meson-efuse",
+ .owner = THIS_MODULE,
+ .stride = 1,
+ .word_size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id meson_efuse_match[] = {
+ { .compatible = "amlogic,meson-gxbb-efuse", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, meson_efuse_match);
+
+static int meson_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem;
+ unsigned int size;
+
+ if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0)
+ return -EINVAL;
+
+ econfig.dev = &pdev->dev;
+ econfig.reg_read = meson_efuse_read;
+ econfig.size = size;
+
+ nvmem = nvmem_register(&econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ platform_set_drvdata(pdev, nvmem);
+
+ return 0;
+}
+
+static int meson_efuse_remove(struct platform_device *pdev)
+{
+ struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+ return nvmem_unregister(nvmem);
+}
+
+static struct platform_driver meson_efuse_driver = {
+ .probe = meson_efuse_probe,
+ .remove = meson_efuse_remove,
+ .driver = {
+ .name = "meson-efuse",
+ .of_match_table = meson_efuse_match,
+ },
+};
+
+module_platform_driver(meson_efuse_driver);
+
+MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
+MODULE_DESCRIPTION("Amlogic Meson NVMEM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 04e2653bb8c0..4d5c5f9f0dbd 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -12,4 +12,11 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config XGENE_PMU
+ depends on PERF_EVENTS && ARCH_XGENE
+ bool "APM X-Gene SoC PMU"
+ default n
+ help
+ Say y if you want to use APM X-Gene SoC performance monitors.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index acd2397ded94..b116e982810b 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
new file mode 100644
index 000000000000..c2ac7646b99f
--- /dev/null
+++ b/drivers/perf/xgene_pmu.c
@@ -0,0 +1,1398 @@
+/*
+ * APM X-Gene SoC PMU (Performance Monitor Unit)
+ *
+ * Copyright (c) 2016, Applied Micro Circuits Corporation
+ * Author: Hoan Tran <hotran@apm.com>
+ * Tai Nguyen <ttnguyen@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CSW_CSWCR 0x0000
+#define CSW_CSWCR_DUALMCB_MASK BIT(0)
+#define MCBADDRMR 0x0000
+#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2)
+
+#define PCPPMU_INTSTATUS_REG 0x000
+#define PCPPMU_INTMASK_REG 0x004
+#define PCPPMU_INTMASK 0x0000000F
+#define PCPPMU_INTENMASK 0xFFFFFFFF
+#define PCPPMU_INTCLRMASK 0xFFFFFFF0
+#define PCPPMU_INT_MCU BIT(0)
+#define PCPPMU_INT_MCB BIT(1)
+#define PCPPMU_INT_L3C BIT(2)
+#define PCPPMU_INT_IOB BIT(3)
+
+#define PMU_MAX_COUNTERS 4
+#define PMU_CNT_MAX_PERIOD 0x100000000ULL
+#define PMU_OVERFLOW_MASK 0xF
+#define PMU_PMCR_E BIT(0)
+#define PMU_PMCR_P BIT(1)
+
+#define PMU_PMEVCNTR0 0x000
+#define PMU_PMEVCNTR1 0x004
+#define PMU_PMEVCNTR2 0x008
+#define PMU_PMEVCNTR3 0x00C
+#define PMU_PMEVTYPER0 0x400
+#define PMU_PMEVTYPER1 0x404
+#define PMU_PMEVTYPER2 0x408
+#define PMU_PMEVTYPER3 0x40C
+#define PMU_PMAMR0 0xA00
+#define PMU_PMAMR1 0xA04
+#define PMU_PMCNTENSET 0xC00
+#define PMU_PMCNTENCLR 0xC20
+#define PMU_PMINTENSET 0xC40
+#define PMU_PMINTENCLR 0xC60
+#define PMU_PMOVSR 0xC80
+#define PMU_PMCR 0xE04
+
+#define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu)
+#define GET_CNTR(ev) (ev->hw.idx)
+#define GET_EVENTID(ev) (ev->hw.config & 0xFFULL)
+#define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL)
+#define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL)
+
+struct hw_pmu_info {
+ u32 type;
+ u32 enable_mask;
+ void __iomem *csr;
+};
+
+struct xgene_pmu_dev {
+ struct hw_pmu_info *inf;
+ struct xgene_pmu *parent;
+ struct pmu pmu;
+ u8 max_counters;
+ DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS);
+ u64 max_period;
+ const struct attribute_group **attr_groups;
+ struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS];
+};
+
+struct xgene_pmu {
+ struct device *dev;
+ int version;
+ void __iomem *pcppmu_csr;
+ u32 mcb_active_mask;
+ u32 mc_active_mask;
+ cpumask_t cpu;
+ raw_spinlock_t lock;
+ struct list_head l3cpmus;
+ struct list_head iobpmus;
+ struct list_head mcbpmus;
+ struct list_head mcpmus;
+};
+
+struct xgene_pmu_dev_ctx {
+ char *name;
+ struct list_head next;
+ struct xgene_pmu_dev *pmu_dev;
+ struct hw_pmu_info inf;
+};
+
+struct xgene_pmu_data {
+ int id;
+ u32 data;
+};
+
+enum xgene_pmu_version {
+ PCP_PMU_V1 = 1,
+ PCP_PMU_V2,
+};
+
+enum xgene_pmu_dev_type {
+ PMU_TYPE_L3C = 0,
+ PMU_TYPE_IOB,
+ PMU_TYPE_MCB,
+ PMU_TYPE_MC,
+};
+
+/*
+ * sysfs format attributes
+ */
+static ssize_t xgene_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "%s\n", (char *) eattr->var);
+}
+
+#define XGENE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *l3c_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"),
+ XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"),
+ NULL,
+};
+
+static struct attribute *iob_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"),
+ XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"),
+ NULL,
+};
+
+static struct attribute *mcb_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"),
+ XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"),
+ NULL,
+};
+
+static struct attribute *mc_pmu_format_attrs[] = {
+ XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"),
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = l3c_pmu_format_attrs,
+};
+
+static const struct attribute_group iob_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = iob_pmu_format_attrs,
+};
+
+static const struct attribute_group mcb_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = mcb_pmu_format_attrs,
+};
+
+static const struct attribute_group mc_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = mc_pmu_format_attrs,
+};
+
+/*
+ * sysfs event attributes
+ */
+static ssize_t xgene_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var);
+}
+
+#define XGENE_PMU_EVENT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \
+ .var = (void *) _config, } \
+ })[0].attr.attr)
+
+static struct attribute *l3c_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(read-hit, 0x02),
+ XGENE_PMU_EVENT_ATTR(read-miss, 0x03),
+ XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06),
+ XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07),
+ XGENE_PMU_EVENT_ATTR(tq-full, 0x08),
+ XGENE_PMU_EVENT_ATTR(ackq-full, 0x09),
+ XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a),
+ XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b),
+ XGENE_PMU_EVENT_ATTR(odb-full, 0x0c),
+ XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d),
+ XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e),
+ XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f),
+ NULL,
+};
+
+static struct attribute *iob_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(axi0-read, 0x02),
+ XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03),
+ XGENE_PMU_EVENT_ATTR(axi1-read, 0x04),
+ XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05),
+ XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06),
+ XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07),
+ XGENE_PMU_EVENT_ATTR(axi0-write, 0x10),
+ XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11),
+ XGENE_PMU_EVENT_ATTR(axi1-write, 0x13),
+ XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14),
+ XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16),
+ NULL,
+};
+
+static struct attribute *mcb_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(csw-read, 0x02),
+ XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03),
+ XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04),
+ XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05),
+ NULL,
+};
+
+static struct attribute *mc_pmu_events_attrs[] = {
+ XGENE_PMU_EVENT_ATTR(cycle-count, 0x00),
+ XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01),
+ XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02),
+ XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03),
+ XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04),
+ XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05),
+ XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06),
+ XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07),
+ XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08),
+ XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09),
+ XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a),
+ XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b),
+ XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c),
+ XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d),
+ XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e),
+ XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f),
+ XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10),
+ XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11),
+ XGENE_PMU_EVENT_ATTR(mcu-request, 0x12),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13),
+ XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19),
+ XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b),
+ XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c),
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = l3c_pmu_events_attrs,
+};
+
+static const struct attribute_group iob_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = iob_pmu_events_attrs,
+};
+
+static const struct attribute_group mcb_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = mcb_pmu_events_attrs,
+};
+
+static const struct attribute_group mc_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = mc_pmu_events_attrs,
+};
+
+/*
+ * sysfs cpumask attributes
+ */
+static ssize_t xgene_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu);
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL);
+
+static struct attribute *xgene_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group pmu_cpumask_attr_group = {
+ .attrs = xgene_pmu_cpumask_attrs,
+};
+
+/*
+ * Per PMU device attribute groups
+ */
+static const struct attribute_group *l3c_pmu_attr_groups[] = {
+ &l3c_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &l3c_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *iob_pmu_attr_groups[] = {
+ &iob_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &iob_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *mcb_pmu_attr_groups[] = {
+ &mcb_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &mcb_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *mc_pmu_attr_groups[] = {
+ &mc_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &mc_pmu_events_attr_group,
+ NULL
+};
+
+static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev)
+{
+ int cntr;
+
+ cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask,
+ pmu_dev->max_counters);
+ if (cntr == pmu_dev->max_counters)
+ return -ENOSPC;
+ set_bit(cntr, pmu_dev->cntr_assign_mask);
+
+ return cntr;
+}
+
+static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr)
+{
+ clear_bit(cntr, pmu_dev->cntr_assign_mask);
+}
+
+static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu)
+{
+ writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
+}
+
+static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu)
+{
+ writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG);
+}
+
+static inline u32 xgene_pmu_read_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_counter(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx));
+}
+
+static inline void
+xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMAMR0);
+}
+
+static inline void
+xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val)
+{
+ writel(val, pmu_dev->inf->csr + PMU_PMAMR1);
+}
+
+static inline void
+xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET);
+}
+
+static inline void
+xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR);
+}
+
+static inline void
+xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMINTENSET);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMINTENSET);
+}
+
+static inline void
+xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR);
+ val |= 1 << idx;
+ writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR);
+}
+
+static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val |= PMU_PMCR_P;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val |= PMU_PMCR_E;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev)
+{
+ u32 val;
+
+ val = readl(pmu_dev->inf->csr + PMU_PMCR);
+ val &= ~PMU_PMCR_E;
+ writel(val, pmu_dev->inf->csr + PMU_PMCR);
+}
+
+static void xgene_perf_pmu_enable(struct pmu *pmu)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
+ int enabled = bitmap_weight(pmu_dev->cntr_assign_mask,
+ pmu_dev->max_counters);
+
+ if (!enabled)
+ return;
+
+ xgene_pmu_start_counters(pmu_dev);
+}
+
+static void xgene_perf_pmu_disable(struct pmu *pmu)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu);
+
+ xgene_pmu_stop_counters(pmu_dev);
+}
+
+static int xgene_perf_event_init(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct perf_event *sibling;
+
+ /* Test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * SOC PMU counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ /* SOC counters do not have usr/os/guest/host bits */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_host || event->attr.exclude_guest)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+ /*
+ * Many perf core operations (eg. events rotation) operate on a
+ * single CPU context. This is obvious for CPU PMUs, where one
+ * expects the same sets of events being observed on all CPUs,
+ * but can lead to issues for off-core PMUs, where each
+ * event could be theoretically assigned to a different CPU. To
+ * mitigate this, we enforce CPU assignment to one, selected
+ * processor (the one described in the "cpumask" attribute).
+ */
+ event->cpu = cpumask_first(&pmu_dev->parent->cpu);
+
+ hw->config = event->attr.config;
+ /*
+ * Each bit of the config1 field represents an agent from which the
+ * request of the event come. The event is counted only if it's caused
+ * by a request of an agent has the bit cleared.
+ * By default, the event is counted for all agents.
+ */
+ hw->config_base = event->attr.config1;
+
+ /*
+ * We must NOT create groups containing mixed PMUs, although software
+ * events are acceptable
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ list_for_each_entry(sibling, &event->group_leader->sibling_list,
+ group_entry)
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void xgene_perf_enable_event(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+
+ xgene_pmu_write_evttype(pmu_dev, GET_CNTR(event), GET_EVENTID(event));
+ xgene_pmu_write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event)));
+ if (pmu_dev->inf->type == PMU_TYPE_IOB)
+ xgene_pmu_write_agent1msk(pmu_dev, ~((u32)GET_AGENT1ID(event)));
+
+ xgene_pmu_enable_counter(pmu_dev, GET_CNTR(event));
+ xgene_pmu_enable_counter_int(pmu_dev, GET_CNTR(event));
+}
+
+static void xgene_perf_disable_event(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+
+ xgene_pmu_disable_counter(pmu_dev, GET_CNTR(event));
+ xgene_pmu_disable_counter_int(pmu_dev, GET_CNTR(event));
+}
+
+static void xgene_perf_event_set_period(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ /*
+ * The X-Gene PMU counters have a period of 2^32. To account for the
+ * possiblity of extreme interrupt latency we program for a period of
+ * half that. Hopefully we can handle the interrupt before another 2^31
+ * events occur and the counter overtakes its previous value.
+ */
+ u64 val = 1ULL << 31;
+
+ local64_set(&hw->prev_count, val);
+ xgene_pmu_write_counter(pmu_dev, hw->idx, (u32) val);
+}
+
+static void xgene_perf_event_update(struct perf_event *event)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 delta, prev_raw_count, new_raw_count;
+
+again:
+ prev_raw_count = local64_read(&hw->prev_count);
+ new_raw_count = xgene_pmu_read_counter(pmu_dev, GET_CNTR(event));
+
+ if (local64_cmpxchg(&hw->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period;
+
+ local64_add(delta, &event->count);
+}
+
+static void xgene_perf_read(struct perf_event *event)
+{
+ xgene_perf_event_update(event);
+}
+
+static void xgene_perf_start(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED)))
+ return;
+
+ WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE));
+ hw->state = 0;
+
+ xgene_perf_event_set_period(event);
+
+ if (flags & PERF_EF_RELOAD) {
+ u64 prev_raw_count = local64_read(&hw->prev_count);
+
+ xgene_pmu_write_counter(pmu_dev, GET_CNTR(event),
+ (u32) prev_raw_count);
+ }
+
+ xgene_perf_enable_event(event);
+ perf_event_update_userpage(event);
+}
+
+static void xgene_perf_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+ u64 config;
+
+ if (hw->state & PERF_HES_UPTODATE)
+ return;
+
+ xgene_perf_disable_event(event);
+ WARN_ON_ONCE(hw->state & PERF_HES_STOPPED);
+ hw->state |= PERF_HES_STOPPED;
+
+ if (hw->state & PERF_HES_UPTODATE)
+ return;
+
+ config = hw->config;
+ xgene_perf_read(event);
+ hw->state |= PERF_HES_UPTODATE;
+}
+
+static int xgene_perf_add(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ /* Allocate an event counter */
+ hw->idx = get_next_avail_cntr(pmu_dev);
+ if (hw->idx < 0)
+ return -EAGAIN;
+
+ /* Update counter event pointer for Interrupt handler */
+ pmu_dev->pmu_counter_event[hw->idx] = event;
+
+ if (flags & PERF_EF_START)
+ xgene_perf_start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+static void xgene_perf_del(struct perf_event *event, int flags)
+{
+ struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ xgene_perf_stop(event, PERF_EF_UPDATE);
+
+ /* clear the assigned counter */
+ clear_avail_cntr(pmu_dev, GET_CNTR(event));
+
+ perf_event_update_userpage(event);
+ pmu_dev->pmu_counter_event[hw->idx] = NULL;
+}
+
+static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
+{
+ struct xgene_pmu *xgene_pmu;
+
+ pmu_dev->max_period = PMU_CNT_MAX_PERIOD - 1;
+ /* First version PMU supports only single event counter */
+ xgene_pmu = pmu_dev->parent;
+ if (xgene_pmu->version == PCP_PMU_V1)
+ pmu_dev->max_counters = 1;
+ else
+ pmu_dev->max_counters = PMU_MAX_COUNTERS;
+
+ /* Perf driver registration */
+ pmu_dev->pmu = (struct pmu) {
+ .attr_groups = pmu_dev->attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = xgene_perf_pmu_enable,
+ .pmu_disable = xgene_perf_pmu_disable,
+ .event_init = xgene_perf_event_init,
+ .add = xgene_perf_add,
+ .del = xgene_perf_del,
+ .start = xgene_perf_start,
+ .stop = xgene_perf_stop,
+ .read = xgene_perf_read,
+ };
+
+ /* Hardware counter init */
+ xgene_pmu_stop_counters(pmu_dev);
+ xgene_pmu_reset_counters(pmu_dev);
+
+ return perf_pmu_register(&pmu_dev->pmu, name, -1);
+}
+
+static int
+xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev *pmu;
+ int rc;
+
+ pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL);
+ if (!pmu)
+ return -ENOMEM;
+ pmu->parent = xgene_pmu;
+ pmu->inf = &ctx->inf;
+ ctx->pmu_dev = pmu;
+
+ switch (pmu->inf->type) {
+ case PMU_TYPE_L3C:
+ pmu->attr_groups = l3c_pmu_attr_groups;
+ break;
+ case PMU_TYPE_IOB:
+ pmu->attr_groups = iob_pmu_attr_groups;
+ break;
+ case PMU_TYPE_MCB:
+ if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask))
+ goto dev_err;
+ pmu->attr_groups = mcb_pmu_attr_groups;
+ break;
+ case PMU_TYPE_MC:
+ if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask))
+ goto dev_err;
+ pmu->attr_groups = mc_pmu_attr_groups;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = xgene_init_perf(pmu, ctx->name);
+ if (rc) {
+ dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name);
+ goto dev_err;
+ }
+
+ dev_info(dev, "%s PMU registered\n", ctx->name);
+
+ return rc;
+
+dev_err:
+ devm_kfree(dev, pmu);
+ return -ENODEV;
+}
+
+static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev)
+{
+ struct xgene_pmu *xgene_pmu = pmu_dev->parent;
+ u32 pmovsr;
+ int idx;
+
+ pmovsr = readl(pmu_dev->inf->csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK;
+ if (!pmovsr)
+ return;
+
+ /* Clear interrupt flag */
+ if (xgene_pmu->version == PCP_PMU_V1)
+ writel(0x0, pmu_dev->inf->csr + PMU_PMOVSR);
+ else
+ writel(pmovsr, pmu_dev->inf->csr + PMU_PMOVSR);
+
+ for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) {
+ struct perf_event *event = pmu_dev->pmu_counter_event[idx];
+ int overflowed = pmovsr & BIT(idx);
+
+ /* Ignore if we don't have an event. */
+ if (!event || !overflowed)
+ continue;
+ xgene_perf_event_update(event);
+ xgene_perf_event_set_period(event);
+ }
+}
+
+static irqreturn_t xgene_pmu_isr(int irq, void *dev_id)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct xgene_pmu *xgene_pmu = dev_id;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&xgene_pmu->lock, flags);
+
+ /* Get Interrupt PMU source */
+ val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG);
+ if (val & PCPPMU_INT_MCU) {
+ list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_MCB) {
+ list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_L3C) {
+ list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+ if (val & PCPPMU_INT_IOB) {
+ list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
+ _xgene_pmu_isr(irq, ctx->pmu_dev);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int acpi_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ void __iomem *csw_csr, *mcba_csr, *mcbb_csr;
+ struct resource *res;
+ unsigned int reg;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ csw_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(csw_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n");
+ return PTR_ERR(csw_csr);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ mcba_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcba_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n");
+ return PTR_ERR(mcba_csr);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ mcbb_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcbb_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n");
+ return PTR_ERR(mcbb_csr);
+ }
+
+ reg = readl(csw_csr + CSW_CSWCR);
+ if (reg & CSW_CSWCR_DUALMCB_MASK) {
+ /* Dual MCB active */
+ xgene_pmu->mcb_active_mask = 0x3;
+ /* Probe all active MC(s) */
+ reg = readl(mcbb_csr + CSW_CSWCR);
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
+ } else {
+ /* Single MCB active */
+ xgene_pmu->mcb_active_mask = 0x1;
+ /* Probe all active MC(s) */
+ reg = readl(mcba_csr + CSW_CSWCR);
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
+ }
+
+ return 0;
+}
+
+static int fdt_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct regmap *csw_map, *mcba_map, *mcbb_map;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int reg;
+
+ csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw");
+ if (IS_ERR(csw_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap csw\n");
+ return PTR_ERR(csw_map);
+ }
+
+ mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba");
+ if (IS_ERR(mcba_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap mcba\n");
+ return PTR_ERR(mcba_map);
+ }
+
+ mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb");
+ if (IS_ERR(mcbb_map)) {
+ dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n");
+ return PTR_ERR(mcbb_map);
+ }
+
+ if (regmap_read(csw_map, CSW_CSWCR, &reg))
+ return -EINVAL;
+
+ if (reg & CSW_CSWCR_DUALMCB_MASK) {
+ /* Dual MCB active */
+ xgene_pmu->mcb_active_mask = 0x3;
+ /* Probe all active MC(s) */
+ if (regmap_read(mcbb_map, MCBADDRMR, &reg))
+ return 0;
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5;
+ } else {
+ /* Single MCB active */
+ xgene_pmu->mcb_active_mask = 0x1;
+ /* Probe all active MC(s) */
+ if (regmap_read(mcba_map, MCBADDRMR, &reg))
+ return 0;
+ xgene_pmu->mc_active_mask =
+ (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1;
+ }
+
+ return 0;
+}
+
+static int xgene_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ if (has_acpi_companion(&pdev->dev))
+ return acpi_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+ return fdt_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+}
+
+static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
+{
+ switch (type) {
+ case PMU_TYPE_L3C:
+ return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id);
+ case PMU_TYPE_IOB:
+ return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
+ case PMU_TYPE_MCB:
+ return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
+ case PMU_TYPE_MC:
+ return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id);
+ default:
+ return devm_kasprintf(dev, GFP_KERNEL, "unknown");
+ }
+}
+
+#if defined(CONFIG_ACPI)
+static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct resource *res = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
+ acpi_dev_resource_memory(ares, res);
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static struct
+xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct acpi_device *adev, u32 type)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct list_head resource_list;
+ struct xgene_pmu_dev_ctx *ctx;
+ const union acpi_object *obj;
+ struct hw_pmu_info *inf;
+ void __iomem *dev_csr;
+ struct resource res;
+ int enable_bit;
+ int rc;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ INIT_LIST_HEAD(&resource_list);
+ rc = acpi_dev_get_resources(adev, &resource_list,
+ acpi_pmu_dev_add_resource, &res);
+ acpi_dev_free_resource_list(&resource_list);
+ if (rc < 0 || IS_ERR(&res)) {
+ dev_err(dev, "PMU type %d: No resource address found\n", type);
+ goto err;
+ }
+
+ dev_csr = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(dev_csr)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ goto err;
+ }
+
+ /* A PMU device node without enable-bit-index is always enabled */
+ rc = acpi_dev_get_property(adev, "enable-bit-index",
+ ACPI_TYPE_INTEGER, &obj);
+ if (rc < 0)
+ enable_bit = 0;
+ else
+ enable_bit = (int) obj->integer.value;
+
+ ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
+ if (!ctx->name) {
+ dev_err(dev, "PMU type %d: Fail to get device name\n", type);
+ goto err;
+ }
+ inf = &ctx->inf;
+ inf->type = type;
+ inf->csr = dev_csr;
+ inf->enable_mask = 1 << enable_bit;
+
+ return ctx;
+err:
+ devm_kfree(dev, ctx);
+ return NULL;
+}
+
+static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct xgene_pmu *xgene_pmu = data;
+ struct xgene_pmu_dev_ctx *ctx;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ if (!strcmp(acpi_device_hid(adev), "APMC0D5D"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_L3C);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D5E"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_IOB);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D5F"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MCB);
+ else if (!strcmp(acpi_device_hid(adev), "APMC0D60"))
+ ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MC);
+ else
+ ctx = NULL;
+
+ if (!ctx)
+ return AE_OK;
+
+ if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
+ /* Can't add the PMU device, skip it */
+ devm_kfree(xgene_pmu->dev, ctx);
+ return AE_OK;
+ }
+
+ switch (ctx->inf.type) {
+ case PMU_TYPE_L3C:
+ list_add(&ctx->next, &xgene_pmu->l3cpmus);
+ break;
+ case PMU_TYPE_IOB:
+ list_add(&ctx->next, &xgene_pmu->iobpmus);
+ break;
+ case PMU_TYPE_MCB:
+ list_add(&ctx->next, &xgene_pmu->mcbpmus);
+ break;
+ case PMU_TYPE_MC:
+ list_add(&ctx->next, &xgene_pmu->mcpmus);
+ break;
+ }
+ return AE_OK;
+}
+
+static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct device *dev = xgene_pmu->dev;
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -EINVAL;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_pmu_dev_add, NULL, xgene_pmu, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to probe PMU devices\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+#else
+static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static struct
+xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
+ struct device_node *np, u32 type)
+{
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev_ctx *ctx;
+ struct hw_pmu_info *inf;
+ void __iomem *dev_csr;
+ struct resource res;
+ int enable_bit;
+ int rc;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ rc = of_address_to_resource(np, 0, &res);
+ if (rc < 0) {
+ dev_err(dev, "PMU type %d: No resource address found\n", type);
+ goto err;
+ }
+ dev_csr = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(dev_csr)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ goto err;
+ }
+
+ /* A PMU device node without enable-bit-index is always enabled */
+ if (of_property_read_u32(np, "enable-bit-index", &enable_bit))
+ enable_bit = 0;
+
+ ctx->name = xgene_pmu_dev_name(dev, type, enable_bit);
+ if (!ctx->name) {
+ dev_err(dev, "PMU type %d: Fail to get device name\n", type);
+ goto err;
+ }
+ inf = &ctx->inf;
+ inf->type = type;
+ inf->csr = dev_csr;
+ inf->enable_mask = 1 << enable_bit;
+
+ return ctx;
+err:
+ devm_kfree(dev, ctx);
+ return NULL;
+}
+
+static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct device_node *np;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_available(np))
+ continue;
+
+ if (of_device_is_compatible(np, "apm,xgene-pmu-l3c"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-iob"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB);
+ else if (of_device_is_compatible(np, "apm,xgene-pmu-mc"))
+ ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC);
+ else
+ ctx = NULL;
+
+ if (!ctx)
+ continue;
+
+ if (xgene_pmu_dev_add(xgene_pmu, ctx)) {
+ /* Can't add the PMU device, skip it */
+ devm_kfree(xgene_pmu->dev, ctx);
+ continue;
+ }
+
+ switch (ctx->inf.type) {
+ case PMU_TYPE_L3C:
+ list_add(&ctx->next, &xgene_pmu->l3cpmus);
+ break;
+ case PMU_TYPE_IOB:
+ list_add(&ctx->next, &xgene_pmu->iobpmus);
+ break;
+ case PMU_TYPE_MCB:
+ list_add(&ctx->next, &xgene_pmu->mcbpmus);
+ break;
+ case PMU_TYPE_MC:
+ list_add(&ctx->next, &xgene_pmu->mcpmus);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu,
+ struct platform_device *pdev)
+{
+ if (has_acpi_companion(&pdev->dev))
+ return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev);
+ return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev);
+}
+
+static const struct xgene_pmu_data xgene_pmu_data = {
+ .id = PCP_PMU_V1,
+};
+
+static const struct xgene_pmu_data xgene_pmu_v2_data = {
+ .id = PCP_PMU_V2,
+};
+
+static const struct of_device_id xgene_pmu_of_match[] = {
+ { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data },
+ { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_pmu_acpi_match[] = {
+ {"APMC0D5B", PCP_PMU_V1},
+ {"APMC0D5C", PCP_PMU_V2},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
+#endif
+
+static int xgene_pmu_probe(struct platform_device *pdev)
+{
+ const struct xgene_pmu_data *dev_data;
+ const struct of_device_id *of_id;
+ struct xgene_pmu *xgene_pmu;
+ struct resource *res;
+ int irq, rc;
+ int version;
+
+ xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
+ if (!xgene_pmu)
+ return -ENOMEM;
+ xgene_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, xgene_pmu);
+
+ version = -EINVAL;
+ of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
+ if (of_id) {
+ dev_data = (const struct xgene_pmu_data *) of_id->data;
+ version = dev_data->id;
+ }
+
+#ifdef CONFIG_ACPI
+ if (ACPI_COMPANION(&pdev->dev)) {
+ const struct acpi_device_id *acpi_id;
+
+ acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
+ if (acpi_id)
+ version = (int) acpi_id->driver_data;
+ }
+#endif
+ if (version < 0)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&xgene_pmu->l3cpmus);
+ INIT_LIST_HEAD(&xgene_pmu->iobpmus);
+ INIT_LIST_HEAD(&xgene_pmu->mcbpmus);
+ INIT_LIST_HEAD(&xgene_pmu->mcpmus);
+
+ xgene_pmu->version = version;
+ dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xgene_pmu->pcppmu_csr)) {
+ dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n");
+ rc = PTR_ERR(xgene_pmu->pcppmu_csr);
+ goto err;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(&pdev->dev), xgene_pmu);
+ if (rc) {
+ dev_err(&pdev->dev, "Could not request IRQ %d\n", irq);
+ goto err;
+ }
+
+ raw_spin_lock_init(&xgene_pmu->lock);
+
+ /* Check for active MCBs and MCUs */
+ rc = xgene_pmu_probe_active_mcb_mcu(xgene_pmu, pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n");
+ xgene_pmu->mcb_active_mask = 0x1;
+ xgene_pmu->mc_active_mask = 0x1;
+ }
+
+ /* Pick one core to use for cpumask attributes */
+ cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu);
+
+ /* Make sure that the overflow interrupt is handled by this CPU */
+ rc = irq_set_affinity(irq, &xgene_pmu->cpu);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
+ goto err;
+ }
+
+ /* Walk through the tree for all PMU perf devices */
+ rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "No PMU perf devices found!\n");
+ goto err;
+ }
+
+ /* Enable interrupt */
+ xgene_pmu_unmask_int(xgene_pmu);
+
+ return 0;
+
+err:
+ if (xgene_pmu->pcppmu_csr)
+ devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
+ devm_kfree(&pdev->dev, xgene_pmu);
+
+ return rc;
+}
+
+static void
+xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
+{
+ struct xgene_pmu_dev_ctx *ctx;
+ struct device *dev = xgene_pmu->dev;
+ struct xgene_pmu_dev *pmu_dev;
+
+ list_for_each_entry(ctx, pmus, next) {
+ pmu_dev = ctx->pmu_dev;
+ if (pmu_dev->inf->csr)
+ devm_iounmap(dev, pmu_dev->inf->csr);
+ devm_kfree(dev, ctx);
+ devm_kfree(dev, pmu_dev);
+ }
+}
+
+static int xgene_pmu_remove(struct platform_device *pdev)
+{
+ struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
+
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
+ xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
+
+ if (xgene_pmu->pcppmu_csr)
+ devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr);
+ devm_kfree(&pdev->dev, xgene_pmu);
+
+ return 0;
+}
+
+static struct platform_driver xgene_pmu_driver = {
+ .probe = xgene_pmu_probe,
+ .remove = xgene_pmu_remove,
+ .driver = {
+ .name = "xgene-pmu",
+ .of_match_table = xgene_pmu_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
+ },
+};
+
+builtin_platform_driver(xgene_pmu_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 345c3df669a0..84e144167b44 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -64,11 +64,11 @@ static int orion_mpp_ctrl_set(unsigned pid, unsigned long config)
return 0;
}
-#define V(f5181l, f5182, f5281) \
- ((f5181l << 0) | (f5182 << 1) | (f5281 << 2))
+#define V(f5181, f5182, f5281) \
+ ((f5181 << 0) | (f5182 << 1) | (f5281 << 2))
enum orion_variant {
- V_5181L = V(1, 0, 0),
+ V_5181 = V(1, 0, 0),
V_5182 = V(0, 1, 0),
V_5281 = V(0, 0, 1),
V_ALL = V(1, 1, 1),
@@ -103,13 +103,13 @@ static struct mvebu_mpp_mode orion_mpp_modes[] = {
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
MPP_VAR_FUNCTION(0x2, "pci", "req5", V_ALL),
MPP_VAR_FUNCTION(0x4, "nand", "re0", V_5182 | V_5281),
- MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L),
+ MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181),
MPP_VAR_FUNCTION(0x5, "sata0", "act", V_5182)),
MPP_MODE(7,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
MPP_VAR_FUNCTION(0x2, "pci", "gnt5", V_ALL),
MPP_VAR_FUNCTION(0x4, "nand", "we0", V_5182 | V_5281),
- MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L),
+ MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181),
MPP_VAR_FUNCTION(0x5, "sata1", "act", V_5182)),
MPP_MODE(8,
MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL),
@@ -165,7 +165,7 @@ static struct mvebu_mpp_ctrl orion_mpp_controls[] = {
MPP_FUNC_CTRL(0, 19, NULL, orion_mpp_ctrl),
};
-static struct pinctrl_gpio_range mv88f5181l_gpio_ranges[] = {
+static struct pinctrl_gpio_range mv88f5181_gpio_ranges[] = {
MPP_GPIO_RANGE(0, 0, 0, 16),
};
@@ -177,14 +177,14 @@ static struct pinctrl_gpio_range mv88f5281_gpio_ranges[] = {
MPP_GPIO_RANGE(0, 0, 0, 16),
};
-static struct mvebu_pinctrl_soc_info mv88f5181l_info = {
- .variant = V_5181L,
+static struct mvebu_pinctrl_soc_info mv88f5181_info = {
+ .variant = V_5181,
.controls = orion_mpp_controls,
.ncontrols = ARRAY_SIZE(orion_mpp_controls),
.modes = orion_mpp_modes,
.nmodes = ARRAY_SIZE(orion_mpp_modes),
- .gpioranges = mv88f5181l_gpio_ranges,
- .ngpioranges = ARRAY_SIZE(mv88f5181l_gpio_ranges),
+ .gpioranges = mv88f5181_gpio_ranges,
+ .ngpioranges = ARRAY_SIZE(mv88f5181_gpio_ranges),
};
static struct mvebu_pinctrl_soc_info mv88f5182_info = {
@@ -212,7 +212,8 @@ static struct mvebu_pinctrl_soc_info mv88f5281_info = {
* muxing, they are identical.
*/
static const struct of_device_id orion_pinctrl_of_match[] = {
- { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181l_info },
+ { .compatible = "marvell,88f5181-pinctrl", .data = &mv88f5181_info },
+ { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181_info },
{ .compatible = "marvell,88f5182-pinctrl", .data = &mv88f5182_info },
{ .compatible = "marvell,88f5281-pinctrl", .data = &mv88f5281_info },
{ }
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 4be1b8c21f6f..06d9fa2f3bc0 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -14,9 +14,58 @@ menuconfig RESET_CONTROLLER
if RESET_CONTROLLER
+config RESET_ATH79
+ bool "AR71xx Reset Driver" if COMPILE_TEST
+ default ATH79
+ help
+ This enables the ATH79 reset controller driver that supports the
+ AR71xx SoC reset controller.
+
+config RESET_BERLIN
+ bool "Berlin Reset Driver" if COMPILE_TEST
+ default ARCH_BERLIN
+ help
+ This enables the reset controller driver for Marvell Berlin SoCs.
+
+config RESET_LPC18XX
+ bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST
+ default ARCH_LPC18XX
+ help
+ This enables the reset controller driver for NXP LPC18xx/43xx SoCs.
+
+config RESET_MESON
+ bool "Meson Reset Driver" if COMPILE_TEST
+ default ARCH_MESON
+ help
+ This enables the reset driver for Amlogic Meson SoCs.
+
config RESET_OXNAS
bool
+config RESET_PISTACHIO
+ bool "Pistachio Reset Driver" if COMPILE_TEST
+ default MACH_PISTACHIO
+ help
+ This enables the reset driver for ImgTec Pistachio SoCs.
+
+config RESET_SOCFPGA
+ bool "SoCFPGA Reset Driver" if COMPILE_TEST
+ default ARCH_SOCFPGA
+ help
+ This enables the reset controller driver for Altera SoCFPGAs.
+
+config RESET_STM32
+ bool "STM32 Reset Driver" if COMPILE_TEST
+ default ARCH_STM32
+ help
+ This enables the RCC reset controller driver for STM32 MCUs.
+
+config RESET_SUNXI
+ bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
+ default ARCH_SUNXI
+ help
+ This enables the reset driver for Allwinner SoCs.
+
config TI_SYSCON_RESET
tristate "TI SYSCON Reset Driver"
depends on HAS_IOMEM
@@ -27,6 +76,22 @@ config TI_SYSCON_RESET
you wish to use the reset framework for such memory-mapped devices,
say Y here. Otherwise, say N.
+config RESET_UNIPHIER
+ tristate "Reset controller driver for UniPhier SoCs"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && MFD_SYSCON
+ default ARCH_UNIPHIER
+ help
+ Support for reset controllers on UniPhier SoCs.
+ Say Y if you want to control reset signals provided by System Control
+ block, Media I/O block, Peripheral Block.
+
+config RESET_ZYNQ
+ bool "ZYNQ Reset Driver" if COMPILE_TEST
+ default ARCH_ZYNQ
+ help
+ This enables the reset controller driver for Xilinx Zynq SoCs.
+
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 5d65a93d3c43..bbe7026617fc 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,13 +1,15 @@
obj-y += core.o
-obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
-obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
-obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
-obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o
-obj-$(CONFIG_ARCH_MESON) += reset-meson.o
-obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
+obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
-obj-$(CONFIG_ARCH_HISI) += hisilicon/
-obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
-obj-$(CONFIG_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
+obj-$(CONFIG_RESET_MESON) += reset-meson.o
obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
+obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o
+obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
+obj-$(CONFIG_RESET_STM32) += reset-stm32.o
+obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o
+obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
+obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 395dc9ce492e..b8ae1dbd4c17 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -138,7 +138,8 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
*/
int reset_control_reset(struct reset_control *rstc)
{
- if (WARN_ON(rstc->shared))
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
+ WARN_ON(rstc->shared))
return -EINVAL;
if (rstc->rcdev->ops->reset)
@@ -161,6 +162,9 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
*/
int reset_control_assert(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (!rstc->rcdev->ops->assert)
return -ENOTSUPP;
@@ -184,6 +188,9 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
*/
int reset_control_deassert(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (!rstc->rcdev->ops->deassert)
return -ENOTSUPP;
@@ -204,6 +211,9 @@ EXPORT_SYMBOL_GPL(reset_control_deassert);
*/
int reset_control_status(struct reset_control *rstc)
{
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
+ return -EINVAL;
+
if (rstc->rcdev->ops->status)
return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
diff --git a/drivers/reset/hisilicon/Kconfig b/drivers/reset/hisilicon/Kconfig
index 26bf95a83a8e..1ff8b0c80980 100644
--- a/drivers/reset/hisilicon/Kconfig
+++ b/drivers/reset/hisilicon/Kconfig
@@ -1,5 +1,6 @@
config COMMON_RESET_HI6220
tristate "Hi6220 Reset Driver"
- depends on (ARCH_HISI && RESET_CONTROLLER)
+ depends on ARCH_HISI || COMPILE_TEST
+ default ARCH_HISI
help
Build the Hisilicon Hi6220 reset driver.
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
index 16d410cd6146..6b97631f5489 100644
--- a/drivers/reset/reset-ath79.c
+++ b/drivers/reset/reset-ath79.c
@@ -12,6 +12,7 @@
* GNU General Public License for more details.
*/
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 12add9b0fa49..78ebf8424375 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -28,7 +28,6 @@
struct socfpga_reset_data {
spinlock_t lock;
void __iomem *membase;
- u32 modrst_offset;
struct reset_controller_dev rcdev;
};
@@ -45,9 +44,8 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
- writel(reg | BIT(offset), data->membase + data->modrst_offset +
- (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
+ writel(reg | BIT(offset), data->membase + (bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
return 0;
@@ -67,9 +65,8 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
spin_lock_irqsave(&data->lock, flags);
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
- writel(reg & ~BIT(offset), data->membase + data->modrst_offset +
- (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
+ writel(reg & ~BIT(offset), data->membase + (bank * NR_BANKS));
spin_unlock_irqrestore(&data->lock, flags);
@@ -85,7 +82,7 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
int offset = id % BITS_PER_LONG;
u32 reg;
- reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
+ reg = readl(data->membase + (bank * NR_BANKS));
return !(reg & BIT(offset));
}
@@ -102,6 +99,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ u32 modrst_offset;
/*
* The binding was mainlined without the required property.
@@ -122,10 +120,11 @@ static int socfpga_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
- if (of_property_read_u32(np, "altr,modrst-offset", &data->modrst_offset)) {
+ if (of_property_read_u32(np, "altr,modrst-offset", &modrst_offset)) {
dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n");
- data->modrst_offset = 0x10;
+ modrst_offset = 0x10;
}
+ data->membase += modrst_offset;
spin_lock_init(&data->lock);
diff --git a/drivers/reset/reset-stm32.c b/drivers/reset/reset-stm32.c
new file mode 100644
index 000000000000..3a7c8527e66a
--- /dev/null
+++ b/drivers/reset/reset-stm32.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * Heavily based on sunxi driver from Maxime Ripard.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct stm32_reset_data {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+static int stm32_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = container_of(rcdev,
+ struct stm32_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg | BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct stm32_reset_data *data = container_of(rcdev,
+ struct stm32_reset_data,
+ rcdev);
+ int bank = id / BITS_PER_LONG;
+ int offset = id % BITS_PER_LONG;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ reg = readl(data->membase + (bank * 4));
+ writel(reg & ~BIT(offset), data->membase + (bank * 4));
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops stm32_reset_ops = {
+ .assert = stm32_reset_assert,
+ .deassert = stm32_reset_deassert,
+};
+
+static const struct of_device_id stm32_reset_dt_ids[] = {
+ { .compatible = "st,stm32-rcc", },
+ { /* sentinel */ },
+};
+
+static int stm32_reset_probe(struct platform_device *pdev)
+{
+ struct stm32_reset_data *data;
+ struct resource *res;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase))
+ return PTR_ERR(data->membase);
+
+ spin_lock_init(&data->lock);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = resource_size(res) * 8;
+ data->rcdev.ops = &stm32_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return devm_reset_controller_register(&pdev->dev, &data->rcdev);
+}
+
+static struct platform_driver stm32_reset_driver = {
+ .probe = stm32_reset_probe,
+ .driver = {
+ .name = "stm32-rcc-reset",
+ .of_match_table = stm32_reset_dt_ids,
+ },
+};
+builtin_platform_driver(stm32_reset_driver);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
new file mode 100644
index 000000000000..8b2558e7363e
--- /dev/null
+++ b/drivers/reset/reset-uniphier.c
@@ -0,0 +1,440 @@
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+struct uniphier_reset_data {
+ unsigned int id;
+ unsigned int reg;
+ unsigned int bit;
+ unsigned int flags;
+#define UNIPHIER_RESET_ACTIVE_LOW BIT(0)
+};
+
+#define UNIPHIER_RESET_ID_END (unsigned int)(-1)
+
+#define UNIPHIER_RESET_END \
+ { .id = UNIPHIER_RESET_ID_END }
+
+#define UNIPHIER_RESET(_id, _reg, _bit) \
+ { \
+ .id = (_id), \
+ .reg = (_reg), \
+ .bit = (_bit), \
+ }
+
+#define UNIPHIER_RESETX(_id, _reg, _bit) \
+ { \
+ .id = (_id), \
+ .reg = (_reg), \
+ .bit = (_bit), \
+ .flags = UNIPHIER_RESET_ACTIVE_LOW, \
+ }
+
+/* System reset data */
+#define UNIPHIER_SLD3_SYS_RESET_STDMAC(id) \
+ UNIPHIER_RESETX((id), 0x2000, 10)
+
+#define UNIPHIER_LD11_SYS_RESET_STDMAC(id) \
+ UNIPHIER_RESETX((id), 0x200c, 8)
+
+#define UNIPHIER_PRO4_SYS_RESET_GIO(id) \
+ UNIPHIER_RESETX((id), 0x2000, 6)
+
+#define UNIPHIER_LD20_SYS_RESET_GIO(id) \
+ UNIPHIER_RESETX((id), 0x200c, 5)
+
+#define UNIPHIER_PRO4_SYS_RESET_USB3(id, ch) \
+ UNIPHIER_RESETX((id), 0x2000 + 0x4 * (ch), 17)
+
+const struct uniphier_reset_data uniphier_sld3_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* Ether, HSC, MIO */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, MIO, RLE */
+ UNIPHIER_PRO4_SYS_RESET_GIO(12), /* Ether, SATA, USB3 */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC */
+ UNIPHIER_PRO4_SYS_RESET_GIO(12), /* PCIe, USB3 */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = {
+ UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, RLE */
+ UNIPHIER_PRO4_SYS_RESET_USB3(14, 0),
+ UNIPHIER_PRO4_SYS_RESET_USB3(15, 1),
+ UNIPHIER_RESETX(16, 0x2014, 4), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x2014, 0), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x2014, 2), /* USB30-PHY2 */
+ UNIPHIER_RESETX(20, 0x2014, 5), /* USB31-PHY0 */
+ UNIPHIER_RESETX(21, 0x2014, 1), /* USB31-PHY1 */
+ UNIPHIER_RESETX(28, 0x2014, 12), /* SATA */
+ UNIPHIER_RESET(29, 0x2014, 8), /* SATA-PHY (active high) */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC, MIO */
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
+ UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC */
+ UNIPHIER_LD20_SYS_RESET_GIO(12), /* PCIe, USB3 */
+ UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */
+ UNIPHIER_RESETX(19, 0x200c, 15), /* USB30-PHY3 */
+ UNIPHIER_RESET_END,
+};
+
+/* Media I/O reset data */
+#define UNIPHIER_MIO_RESET_SD(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_SD_BRIDGE(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 26)
+
+#define UNIPHIER_MIO_RESET_EMMC_HW_RESET(id, ch) \
+ UNIPHIER_RESETX((id), 0x80 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_USB2(id, ch) \
+ UNIPHIER_RESETX((id), 0x114 + 0x200 * (ch), 0)
+
+#define UNIPHIER_MIO_RESET_USB2_BRIDGE(id, ch) \
+ UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 24)
+
+#define UNIPHIER_MIO_RESET_DMAC(id) \
+ UNIPHIER_RESETX((id), 0x110, 17)
+
+const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
+ UNIPHIER_MIO_RESET_SD(0, 0),
+ UNIPHIER_MIO_RESET_SD(1, 1),
+ UNIPHIER_MIO_RESET_SD(2, 2),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(3, 0),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(4, 1),
+ UNIPHIER_MIO_RESET_SD_BRIDGE(5, 2),
+ UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
+ UNIPHIER_MIO_RESET_DMAC(7),
+ UNIPHIER_MIO_RESET_USB2(8, 0),
+ UNIPHIER_MIO_RESET_USB2(9, 1),
+ UNIPHIER_MIO_RESET_USB2(10, 2),
+ UNIPHIER_MIO_RESET_USB2(11, 3),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(12, 0),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(13, 1),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(14, 2),
+ UNIPHIER_MIO_RESET_USB2_BRIDGE(15, 3),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = {
+ UNIPHIER_MIO_RESET_SD(0, 0),
+ UNIPHIER_MIO_RESET_SD(1, 1),
+ UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
+ UNIPHIER_RESET_END,
+};
+
+/* Peripheral reset data */
+#define UNIPHIER_PERI_RESET_UART(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 19 + (ch))
+
+#define UNIPHIER_PERI_RESET_I2C(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 5 + (ch))
+
+#define UNIPHIER_PERI_RESET_FI2C(id, ch) \
+ UNIPHIER_RESETX((id), 0x114, 24 + (ch))
+
+const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = {
+ UNIPHIER_PERI_RESET_UART(0, 0),
+ UNIPHIER_PERI_RESET_UART(1, 1),
+ UNIPHIER_PERI_RESET_UART(2, 2),
+ UNIPHIER_PERI_RESET_UART(3, 3),
+ UNIPHIER_PERI_RESET_I2C(4, 0),
+ UNIPHIER_PERI_RESET_I2C(5, 1),
+ UNIPHIER_PERI_RESET_I2C(6, 2),
+ UNIPHIER_PERI_RESET_I2C(7, 3),
+ UNIPHIER_PERI_RESET_I2C(8, 4),
+ UNIPHIER_RESET_END,
+};
+
+const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = {
+ UNIPHIER_PERI_RESET_UART(0, 0),
+ UNIPHIER_PERI_RESET_UART(1, 1),
+ UNIPHIER_PERI_RESET_UART(2, 2),
+ UNIPHIER_PERI_RESET_UART(3, 3),
+ UNIPHIER_PERI_RESET_FI2C(4, 0),
+ UNIPHIER_PERI_RESET_FI2C(5, 1),
+ UNIPHIER_PERI_RESET_FI2C(6, 2),
+ UNIPHIER_PERI_RESET_FI2C(7, 3),
+ UNIPHIER_PERI_RESET_FI2C(8, 4),
+ UNIPHIER_PERI_RESET_FI2C(9, 5),
+ UNIPHIER_PERI_RESET_FI2C(10, 6),
+ UNIPHIER_RESET_END,
+};
+
+/* core implementaton */
+struct uniphier_reset_priv {
+ struct reset_controller_dev rcdev;
+ struct device *dev;
+ struct regmap *regmap;
+ const struct uniphier_reset_data *data;
+};
+
+#define to_uniphier_reset_priv(_rcdev) \
+ container_of(_rcdev, struct uniphier_reset_priv, rcdev)
+
+static int uniphier_reset_update(struct reset_controller_dev *rcdev,
+ unsigned long id, int assert)
+{
+ struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
+ const struct uniphier_reset_data *p;
+
+ for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
+ unsigned int mask, val;
+
+ if (p->id != id)
+ continue;
+
+ mask = BIT(p->bit);
+
+ if (assert)
+ val = mask;
+ else
+ val = ~mask;
+
+ if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
+ val = ~val;
+
+ return regmap_write_bits(priv->regmap, p->reg, mask, val);
+ }
+
+ dev_err(priv->dev, "reset_id=%lu was not handled\n", id);
+ return -EINVAL;
+}
+
+static int uniphier_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return uniphier_reset_update(rcdev, id, 1);
+}
+
+static int uniphier_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return uniphier_reset_update(rcdev, id, 0);
+}
+
+static int uniphier_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev);
+ const struct uniphier_reset_data *p;
+
+ for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) {
+ unsigned int val;
+ int ret, asserted;
+
+ if (p->id != id)
+ continue;
+
+ ret = regmap_read(priv->regmap, p->reg, &val);
+ if (ret)
+ return ret;
+
+ asserted = !!(val & BIT(p->bit));
+
+ if (p->flags & UNIPHIER_RESET_ACTIVE_LOW)
+ asserted = !asserted;
+
+ return asserted;
+ }
+
+ dev_err(priv->dev, "reset_id=%lu was not found\n", id);
+ return -EINVAL;
+}
+
+static const struct reset_control_ops uniphier_reset_ops = {
+ .assert = uniphier_reset_assert,
+ .deassert = uniphier_reset_deassert,
+ .status = uniphier_reset_status,
+};
+
+static int uniphier_reset_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_reset_priv *priv;
+ const struct uniphier_reset_data *p, *data;
+ struct regmap *regmap;
+ struct device_node *parent;
+ unsigned int nr_resets = 0;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ parent = of_get_parent(dev->of_node); /* parent should be syscon node */
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to get regmap (error %ld)\n",
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (p = data; p->id != UNIPHIER_RESET_ID_END; p++)
+ nr_resets = max(nr_resets, p->id + 1);
+
+ priv->rcdev.ops = &uniphier_reset_ops;
+ priv->rcdev.owner = dev->driver->owner;
+ priv->rcdev.of_node = dev->of_node;
+ priv->rcdev.nr_resets = nr_resets;
+ priv->dev = dev;
+ priv->regmap = regmap;
+ priv->data = data;
+
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
+}
+
+static const struct of_device_id uniphier_reset_match[] = {
+ /* System reset */
+ {
+ .compatible = "socionext,uniphier-sld3-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld4-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-reset",
+ .data = uniphier_pro4_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-reset",
+ .data = uniphier_sld3_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-reset",
+ .data = uniphier_pro5_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-reset",
+ .data = uniphier_pxs2_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-reset",
+ .data = uniphier_ld11_sys_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-reset",
+ .data = uniphier_ld20_sys_reset_data,
+ },
+ /* Media I/O reset */
+ {
+ .compatible = "socionext,uniphier-sld3-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld4-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-mio-reset",
+ .data = uniphier_pro5_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-mio-reset",
+ .data = uniphier_pro5_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-mio-reset",
+ .data = uniphier_sld3_mio_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-mio-reset",
+ .data = uniphier_pro5_mio_reset_data,
+ },
+ /* Peripheral reset */
+ {
+ .compatible = "socionext,uniphier-ld4-peri-reset",
+ .data = uniphier_ld4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro4-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-sld8-peri-reset",
+ .data = uniphier_ld4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pro5-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_reset_match);
+
+static struct platform_driver uniphier_reset_driver = {
+ .probe = uniphier_reset_probe,
+ .driver = {
+ .name = "uniphier-reset",
+ .of_match_table = uniphier_reset_match,
+ },
+};
+module_platform_driver(uniphier_reset_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index a003ba26ca6e..a5f10936fb9c 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -583,7 +583,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
{
unsigned long timeout;
- timeout = jiffies + usecs_to_jiffies(255);
+ timeout = jiffies + usecs_to_jiffies(10000);
do {
if (time_after(jiffies, timeout))
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index ac1957dfdf24..322034ab9d37 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -95,7 +95,7 @@ static const struct {
/**
* struct qcom_smd_edge - representing a remote processor
- * @smd: handle to qcom_smd
+ * @dev: device for this edge
* @of_node: of_node handle for information related to this edge
* @edge_id: identifier of this edge
* @remote_pid: identifier of remote processor
@@ -111,7 +111,8 @@ static const struct {
* @state_work: work item for edge state changes
*/
struct qcom_smd_edge {
- struct qcom_smd *smd;
+ struct device dev;
+
struct device_node *of_node;
unsigned edge_id;
unsigned remote_pid;
@@ -135,6 +136,8 @@ struct qcom_smd_edge {
struct work_struct state_work;
};
+#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
+
/*
* SMD channel states.
*/
@@ -197,20 +200,6 @@ struct qcom_smd_channel {
void *drvdata;
struct list_head list;
- struct list_head dev_list;
-};
-
-/**
- * struct qcom_smd - smd struct
- * @dev: device struct
- * @num_edges: number of entries in @edges
- * @edges: array of edges to be handled
- */
-struct qcom_smd {
- struct device *dev;
-
- unsigned num_edges;
- struct qcom_smd_edge edges[0];
};
/*
@@ -374,7 +363,7 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
SET_TX_CHANNEL_INFO(channel, head, 0);
- SET_TX_CHANNEL_INFO(channel, tail, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
qcom_smd_signal_channel(channel);
@@ -421,7 +410,7 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
if (channel->state == state)
return;
- dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
+ dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state);
SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
@@ -891,8 +880,6 @@ static int qcom_smd_dev_remove(struct device *dev)
struct qcom_smd_device *qsdev = to_smd_device(dev);
struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
struct qcom_smd_channel *channel = qsdev->channel;
- struct qcom_smd_channel *tmp;
- struct qcom_smd_channel *ch;
qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
@@ -911,15 +898,9 @@ static int qcom_smd_dev_remove(struct device *dev)
if (qsdrv->remove)
qsdrv->remove(qsdev);
- /*
- * The client is now gone, close and release all channels associated
- * with this sdev
- */
- list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
- qcom_smd_channel_close(ch);
- list_del(&ch->dev_list);
- ch->qsdev = NULL;
- }
+ /* The client is now gone, close the primary channel */
+ qcom_smd_channel_close(channel);
+ channel->qsdev = NULL;
return 0;
}
@@ -973,13 +954,12 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
struct qcom_smd_device *qsdev;
struct qcom_smd_edge *edge = channel->edge;
struct device_node *node;
- struct qcom_smd *smd = edge->smd;
int ret;
if (channel->qsdev)
return -EEXIST;
- dev_dbg(smd->dev, "registering '%s'\n", channel->name);
+ dev_dbg(&edge->dev, "registering '%s'\n", channel->name);
qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
if (!qsdev)
@@ -990,7 +970,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
edge->of_node->name,
node ? node->name : channel->name);
- qsdev->dev.parent = smd->dev;
+ qsdev->dev.parent = &edge->dev;
qsdev->dev.bus = &qcom_smd_bus;
qsdev->dev.release = qcom_smd_release_device;
qsdev->dev.of_node = node;
@@ -1001,7 +981,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
ret = device_register(&qsdev->dev);
if (ret) {
- dev_err(smd->dev, "device_register failed: %d\n", ret);
+ dev_err(&edge->dev, "device_register failed: %d\n", ret);
put_device(&qsdev->dev);
}
@@ -1091,6 +1071,8 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
*
* Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
* ready.
+ *
+ * Any channels returned must be closed with a call to qcom_smd_close_channel()
*/
struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
const char *name,
@@ -1120,15 +1102,21 @@ struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
return ERR_PTR(ret);
}
- /*
- * Append the list of channel to the channels associated with the sdev
- */
- list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
-
return channel;
}
EXPORT_SYMBOL(qcom_smd_open_channel);
+/**
+ * qcom_smd_close_channel() - close an additionally opened channel
+ * @channel: channel handle, returned by qcom_smd_open_channel()
+ */
+void qcom_smd_close_channel(struct qcom_smd_channel *channel)
+{
+ qcom_smd_channel_close(channel);
+ channel->qsdev = NULL;
+}
+EXPORT_SYMBOL(qcom_smd_close_channel);
+
/*
* Allocate the qcom_smd_channel object for a newly found smd channel,
* retrieving and validating the smem items involved.
@@ -1139,20 +1127,18 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
char *name)
{
struct qcom_smd_channel *channel;
- struct qcom_smd *smd = edge->smd;
size_t fifo_size;
size_t info_size;
void *fifo_base;
void *info;
int ret;
- channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
+ channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
if (!channel)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&channel->dev_list);
channel->edge = edge;
- channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
+ channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
if (!channel->name)
return ERR_PTR(-ENOMEM);
@@ -1175,7 +1161,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
} else if (info_size == 2 * sizeof(struct smd_channel_info)) {
channel->info = info;
} else {
- dev_err(smd->dev,
+ dev_err(&edge->dev,
"channel info of size %zu not supported\n", info_size);
ret = -EINVAL;
goto free_name_and_channel;
@@ -1190,7 +1176,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
/* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2;
- dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
name, info_size, fifo_size);
channel->tx_fifo = fifo_base;
@@ -1202,8 +1188,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
return channel;
free_name_and_channel:
- devm_kfree(smd->dev, channel->name);
- devm_kfree(smd->dev, channel);
+ devm_kfree(&edge->dev, channel->name);
+ devm_kfree(&edge->dev, channel);
return ERR_PTR(ret);
}
@@ -1219,7 +1205,6 @@ static void qcom_channel_scan_worker(struct work_struct *work)
struct qcom_smd_alloc_entry *alloc_tbl;
struct qcom_smd_alloc_entry *entry;
struct qcom_smd_channel *channel;
- struct qcom_smd *smd = edge->smd;
unsigned long flags;
unsigned fifo_id;
unsigned info_id;
@@ -1263,7 +1248,7 @@ static void qcom_channel_scan_worker(struct work_struct *work)
list_add(&channel->list, &edge->channels);
spin_unlock_irqrestore(&edge->channels_lock, flags);
- dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
+ dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name);
set_bit(i, edge->allocated[tbl]);
wake_up_interruptible(&edge->new_channel_event);
@@ -1350,22 +1335,6 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->of_node = of_node_get(node);
- irq = irq_of_parse_and_map(node, 0);
- if (irq < 0) {
- dev_err(dev, "required smd interrupt missing\n");
- return -EINVAL;
- }
-
- ret = devm_request_irq(dev, irq,
- qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
- node->name, edge);
- if (ret) {
- dev_err(dev, "failed to request smd irq\n");
- return ret;
- }
-
- edge->irq = irq;
-
key = "qcom,smd-edge";
ret = of_property_read_u32(node, key, &edge->edge_id);
if (ret) {
@@ -1400,18 +1369,121 @@ static int qcom_smd_parse_edge(struct device *dev,
return -EINVAL;
}
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq < 0) {
+ dev_err(dev, "required smd interrupt missing\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq,
+ qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+ node->name, edge);
+ if (ret) {
+ dev_err(dev, "failed to request smd irq\n");
+ return ret;
+ }
+
+ edge->irq = irq;
+
return 0;
}
-static int qcom_smd_probe(struct platform_device *pdev)
+/*
+ * Release function for an edge.
+ * Reset the state of each associated channel and free the edge context.
+ */
+static void qcom_smd_edge_release(struct device *dev)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
+
+ list_for_each_entry(channel, &edge->channels, list) {
+ SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+ SET_RX_CHANNEL_INFO(channel, head, 0);
+ SET_RX_CHANNEL_INFO(channel, tail, 0);
+ }
+
+ kfree(edge);
+}
+
+/**
+ * qcom_smd_register_edge() - register an edge based on an device_node
+ * @parent: parent device for the edge
+ * @node: device_node describing the edge
+ *
+ * Returns an edge reference, or negative ERR_PTR() on failure.
+ */
+struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
+ struct device_node *node)
{
struct qcom_smd_edge *edge;
- struct device_node *node;
- struct qcom_smd *smd;
- size_t array_size;
- int num_edges;
int ret;
- int i = 0;
+
+ edge = kzalloc(sizeof(*edge), GFP_KERNEL);
+ if (!edge)
+ return ERR_PTR(-ENOMEM);
+
+ init_waitqueue_head(&edge->new_channel_event);
+
+ edge->dev.parent = parent;
+ edge->dev.release = qcom_smd_edge_release;
+ dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name);
+ ret = device_register(&edge->dev);
+ if (ret) {
+ pr_err("failed to register smd edge\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = qcom_smd_parse_edge(&edge->dev, node, edge);
+ if (ret) {
+ dev_err(&edge->dev, "failed to parse smd edge\n");
+ goto unregister_dev;
+ }
+
+ schedule_work(&edge->scan_work);
+
+ return edge;
+
+unregister_dev:
+ put_device(&edge->dev);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(qcom_smd_register_edge);
+
+static int qcom_smd_remove_device(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ of_node_put(dev->of_node);
+ put_device(dev);
+
+ return 0;
+}
+
+/**
+ * qcom_smd_unregister_edge() - release an edge and its children
+ * @edge: edge reference acquired from qcom_smd_register_edge
+ */
+int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+{
+ int ret;
+
+ disable_irq(edge->irq);
+ cancel_work_sync(&edge->scan_work);
+ cancel_work_sync(&edge->state_work);
+
+ ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device);
+ if (ret)
+ dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
+
+ device_unregister(&edge->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smd_unregister_edge);
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
void *p;
/* Wait for smem */
@@ -1419,29 +1491,17 @@ static int qcom_smd_probe(struct platform_device *pdev)
if (PTR_ERR(p) == -EPROBE_DEFER)
return PTR_ERR(p);
- num_edges = of_get_available_child_count(pdev->dev.of_node);
- array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
- smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
- if (!smd)
- return -ENOMEM;
- smd->dev = &pdev->dev;
-
- smd->num_edges = num_edges;
- for_each_available_child_of_node(pdev->dev.of_node, node) {
- edge = &smd->edges[i++];
- edge->smd = smd;
- init_waitqueue_head(&edge->new_channel_event);
-
- ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
- if (ret)
- continue;
+ for_each_available_child_of_node(pdev->dev.of_node, node)
+ qcom_smd_register_edge(&pdev->dev, node);
- schedule_work(&edge->scan_work);
- }
+ return 0;
+}
- platform_set_drvdata(pdev, smd);
+static int qcom_smd_remove_edge(struct device *dev, void *data)
+{
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
- return 0;
+ return qcom_smd_unregister_edge(edge);
}
/*
@@ -1450,28 +1510,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
*/
static int qcom_smd_remove(struct platform_device *pdev)
{
- struct qcom_smd_channel *channel;
- struct qcom_smd_edge *edge;
- struct qcom_smd *smd = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < smd->num_edges; i++) {
- edge = &smd->edges[i];
-
- disable_irq(edge->irq);
- cancel_work_sync(&edge->scan_work);
- cancel_work_sync(&edge->state_work);
-
- /* No need to lock here, because the writer is gone */
- list_for_each_entry(channel, &edge->channels, list) {
- if (!channel->qsdev)
- continue;
+ int ret;
- qcom_smd_destroy_device(channel);
- }
- }
+ ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
+ if (ret)
+ dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret);
- return 0;
+ return ret;
}
static const struct of_device_id qcom_smd_of_match[] = {
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 2e1aa9f130f4..18ec52f2078a 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -740,7 +740,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
if (hwlock_id < 0) {
- dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+ if (hwlock_id != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to retrieve hwlock\n");
return hwlock_id;
}
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 44842a205e4b..7acd1517dd37 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -27,6 +27,7 @@ struct rockchip_domain_info {
int req_mask;
int idle_mask;
int ack_mask;
+ bool active_wakeup;
};
struct rockchip_pmu_info {
@@ -75,23 +76,24 @@ struct rockchip_pmu {
#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
-#define DOMAIN(pwr, status, req, idle, ack) \
+#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
{ \
.pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
.status_mask = (status >= 0) ? BIT(status) : 0, \
.req_mask = (req >= 0) ? BIT(req) : 0, \
.idle_mask = (idle >= 0) ? BIT(idle) : 0, \
.ack_mask = (ack >= 0) ? BIT(ack) : 0, \
+ .active_wakeup = wakeup, \
}
-#define DOMAIN_RK3288(pwr, status, req) \
- DOMAIN(pwr, status, req, req, (req) + 16)
+#define DOMAIN_RK3288(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, (req) + 16, wakeup)
-#define DOMAIN_RK3368(pwr, status, req) \
- DOMAIN(pwr, status, req, (req) + 16, req)
+#define DOMAIN_RK3368(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, (req) + 16, req, wakeup)
-#define DOMAIN_RK3399(pwr, status, req) \
- DOMAIN(pwr, status, req, req, req)
+#define DOMAIN_RK3399(pwr, status, req, wakeup) \
+ DOMAIN(pwr, status, req, req, req, wakeup)
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
@@ -295,6 +297,17 @@ static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
pm_clk_destroy(dev);
}
+static bool rockchip_active_wakeup(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct rockchip_pm_domain *pd;
+
+ genpd = pd_to_genpd(dev->pm_domain);
+ pd = container_of(genpd, struct rockchip_pm_domain, genpd);
+
+ return pd->info->active_wakeup;
+}
+
static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
struct device_node *node)
{
@@ -415,6 +428,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
+ pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup;
pd->genpd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&pd->genpd, NULL, false);
@@ -623,48 +637,48 @@ err_out:
}
static const struct rockchip_domain_info rk3288_pm_domains[] = {
- [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4),
- [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9),
- [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3),
- [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2),
+ [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false),
+ [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false),
+ [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false),
+ [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false),
};
static const struct rockchip_domain_info rk3368_pm_domains[] = {
- [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6),
- [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8),
- [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7),
- [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2),
- [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2),
+ [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true),
+ [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false),
+ [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false),
+ [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false),
+ [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false),
};
static const struct rockchip_domain_info rk3399_pm_domains[] = {
- [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1),
- [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1),
- [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1),
- [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15),
- [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16),
- [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1),
- [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2),
- [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14),
- [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17),
- [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0),
- [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3),
- [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4),
- [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5),
- [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6),
- [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1),
- [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7),
- [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8),
- [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9),
- [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10),
- [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11),
- [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23),
- [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24),
- [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12),
- [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22),
- [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27),
- [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28),
- [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29),
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false),
+ [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true),
+ [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false),
+ [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false),
+ [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false),
+ [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false),
+ [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false),
+ [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true),
+ [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true),
+ [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false),
+ [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true),
+ [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true),
};
static const struct rockchip_pmu_info rk3288_pmu = {
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 71c834f3847e..7792ed88d80b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -967,8 +967,8 @@ static void tegra_io_rail_unprepare(void)
int tegra_io_rail_power_on(unsigned int id)
{
- unsigned long request, status, value;
- unsigned int bit, mask;
+ unsigned long request, status;
+ unsigned int bit;
int err;
mutex_lock(&pmc->powergates_lock);
@@ -977,15 +977,9 @@ int tegra_io_rail_power_on(unsigned int id)
if (err)
goto error;
- mask = 1 << bit;
+ tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request);
- value = tegra_pmc_readl(request);
- value |= mask;
- value &= ~IO_DPD_REQ_CODE_MASK;
- value |= IO_DPD_REQ_CODE_OFF;
- tegra_pmc_writel(value, request);
-
- err = tegra_io_rail_poll(status, mask, 0, 250);
+ err = tegra_io_rail_poll(status, BIT(bit), 0, 250);
if (err) {
pr_info("tegra_io_rail_poll() failed: %d\n", err);
goto error;
@@ -1002,8 +996,8 @@ EXPORT_SYMBOL(tegra_io_rail_power_on);
int tegra_io_rail_power_off(unsigned int id)
{
- unsigned long request, status, value;
- unsigned int bit, mask;
+ unsigned long request, status;
+ unsigned int bit;
int err;
mutex_lock(&pmc->powergates_lock);
@@ -1014,15 +1008,9 @@ int tegra_io_rail_power_off(unsigned int id)
goto error;
}
- mask = 1 << bit;
-
- value = tegra_pmc_readl(request);
- value |= mask;
- value &= ~IO_DPD_REQ_CODE_MASK;
- value |= IO_DPD_REQ_CODE_ON;
- tegra_pmc_writel(value, request);
+ tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request);
- err = tegra_io_rail_poll(status, mask, mask, 250);
+ err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250);
if (err)
goto error;