summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 18:41:12 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 18:41:12 +0100
commit384d11fa0e2ca15a3e7e52db34a4e43bedf0dc70 (patch)
treeb68ecd9458fb77d6b80e1389fc3705ca360a7def /drivers
parentMerge tag 'armsoc-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc (diff)
parentMerge tag 'soc-fsl-next-v5.1-4' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
downloadlinux-384d11fa0e2ca15a3e7e52db34a4e43bedf0dc70.tar.xz
linux-384d11fa0e2ca15a3e7e52db34a4e43bedf0dc70.zip
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Arnd Bergmann: "As usual, the drivers/tee and drivers/reset subsystems get merged here, with the expected set of smaller updates and some new hardware support. The tee subsystem now supports device drivers to be attached to a tee, the first example here is a random number driver with its implementation in the secure world. Three new power domain drivers get added for specific chip families: - Broadcom BCM283x chips (used in Raspberry Pi) - Qualcomm Snapdragon phone chips - Xilinx ZynqMP FPGA SoCs One new driver is added to talk to the BPMP firmware on NVIDIA Tegra210 Existing drivers are extended for new SoC variants from NXP, NVIDIA, Amlogic and Qualcomm" * tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (113 commits) tee: optee: update optee_msg.h and optee_smc.h to dual license tee: add cancellation support to client interface dpaa2-eth: configure the cache stashing amount on a queue soc: fsl: dpio: configure cache stashing destination soc: fsl: dpio: enable frame data cache stashing per software portal soc: fsl: guts: make fsl_guts_get_svr() static hwrng: make symbol 'optee_rng_id_table' static tee: optee: Fix unsigned comparison with less than zero hwrng: Fix unsigned comparison with less than zero tee: fix possible error pointer ctx dereferencing hwrng: optee: Initialize some structs using memset instead of braces tee: optee: Initialize some structs using memset instead of braces soc: fsl: dpio: fix memory leak of a struct qbman on error exit path clk: tegra: dfll: Make symbol 'tegra210_cpu_cvb_tables' static soc: qcom: llcc-slice: Fix typos qcom: soc: llcc-slice: Consolidate some code qcom: soc: llcc-slice: Clear the global drv_data pointer on error drivers: soc: xilinx: Add ZynqMP power domain driver firmware: xilinx: Add APIs to control node status/power dt-bindings: power: Add ZynqMP power domain bindings ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/hisi_lpc.c5
-rw-r--r--drivers/bus/imx-weim.c70
-rw-r--r--drivers/char/hw_random/Kconfig15
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/optee-rng.c306
-rw-r--r--drivers/clk/tegra/Kconfig5
-rw-r--r--drivers/clk/tegra/Makefile2
-rw-r--r--drivers/clk/tegra/clk-dfll.c459
-rw-r--r--drivers/clk/tegra/clk-dfll.h6
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c520
-rw-r--r--drivers/clk/tegra/cvb.c12
-rw-r--r--drivers/clk/tegra/cvb.h7
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c44
-rw-r--r--drivers/firmware/imx/misc.c38
-rw-r--r--drivers/firmware/imx/scu-pd.c1
-rw-r--r--drivers/firmware/raspberrypi.c11
-rw-r--r--drivers/firmware/tegra/Makefile3
-rw-r--r--drivers/firmware/tegra/bpmp-private.h34
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c305
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c243
-rw-r--r--drivers/firmware/tegra/bpmp.c376
-rw-r--r--drivers/firmware/ti_sci.c21
-rw-r--r--drivers/firmware/xilinx/Kconfig1
-rw-r--r--drivers/firmware/xilinx/zynqmp.c166
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/bcm2835-pm.c92
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c7
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile2
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c86
-rw-r--r--drivers/opp/core.c18
-rw-r--r--drivers/opp/of.c2
-rw-r--r--drivers/opp/opp.h2
-rw-r--r--drivers/reset/Kconfig12
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-brcmstb.c132
-rw-r--r--drivers/reset/reset-imx7.c172
-rw-r--r--drivers/reset/reset-socfpga.c2
-rw-r--r--drivers/reset/reset-sunxi.c1
-rw-r--r--drivers/reset/reset-zynqmp.c114
-rw-r--r--drivers/soc/amlogic/meson-canvas.c18
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c196
-rw-r--r--drivers/soc/bcm/Kconfig12
-rw-r--r--drivers/soc/bcm/Makefile1
-rw-r--r--drivers/soc/bcm/bcm2835-power.c661
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h5
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c54
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c5
-rw-r--r--drivers/soc/fsl/dpio/dpio.c16
-rw-r--r--drivers/soc/fsl/dpio/dpio.h5
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c5
-rw-r--r--drivers/soc/fsl/guts.c10
-rw-r--r--drivers/soc/imx/Kconfig2
-rw-r--r--drivers/soc/imx/gpcv2.c76
-rw-r--r--drivers/soc/qcom/Kconfig18
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c6
-rw-r--r--drivers/soc/qcom/llcc-slice.c101
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c7
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c32
-rw-r--r--drivers/soc/qcom/rpmh.c37
-rw-r--r--drivers/soc/qcom/rpmhpd.c406
-rw-r--r--drivers/soc/qcom/rpmpd.c315
-rw-r--r--drivers/soc/qcom/smd-rpm.c1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c12
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c2
-rw-r--r--drivers/soc/tegra/pmc.c424
-rw-r--r--drivers/soc/ti/knav_dma.c2
-rw-r--r--drivers/soc/xilinx/Kconfig20
-rw-r--r--drivers/soc/xilinx/Makefile2
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c321
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c178
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c4
-rw-r--r--drivers/tee/optee/device.c160
-rw-r--r--drivers/tee/optee/optee_msg.h26
-rw-r--r--drivers/tee/optee/optee_private.h3
-rw-r--r--drivers/tee/optee/optee_smc.h26
-rw-r--r--drivers/tee/optee/supp.c10
-rw-r--r--drivers/tee/tee_core.c78
-rw-r--r--drivers/watchdog/bcm2835_wdt.c26
84 files changed, 5895 insertions, 703 deletions
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index d5f85455fa62..19d7b6ff2f17 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -522,10 +522,9 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
if (!found) {
dev_warn(hostdev,
- "could not find cell for child device (%s)\n",
+ "could not find cell for child device (%s), discarding\n",
hid);
- ret = -ENODEV;
- goto fail;
+ continue;
}
pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index d84996a4528e..db74334ca5ef 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -46,6 +46,17 @@ static const struct imx_weim_devtype imx51_weim_devtype = {
};
#define MAX_CS_REGS_COUNT 6
+#define MAX_CS_COUNT 6
+#define OF_REG_SIZE 3
+
+struct cs_timing {
+ bool is_applied;
+ u32 regs[MAX_CS_REGS_COUNT];
+};
+
+struct cs_timing_state {
+ struct cs_timing cs[MAX_CS_COUNT];
+};
static const struct of_device_id weim_id_table[] = {
/* i.MX1/21 */
@@ -111,21 +122,19 @@ err:
}
/* Parse and set the timing for this device. */
-static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
- const struct imx_weim_devtype *devtype)
+static int __init weim_timing_setup(struct device *dev,
+ struct device_node *np, void __iomem *base,
+ const struct imx_weim_devtype *devtype,
+ struct cs_timing_state *ts)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
+ int reg_idx, num_regs;
+ struct cs_timing *cst;
if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT))
return -EINVAL;
-
- /* get the CS index from this child node's "reg" property. */
- ret = of_property_read_u32(np, "reg", &cs_idx);
- if (ret)
- return ret;
-
- if (cs_idx >= devtype->cs_count)
+ if (WARN_ON(devtype->cs_count > MAX_CS_COUNT))
return -EINVAL;
ret = of_property_read_u32_array(np, "fsl,weim-cs-timing",
@@ -133,9 +142,43 @@ static int __init weim_timing_setup(struct device_node *np, void __iomem *base,
if (ret)
return ret;
- /* set the timing for WEIM */
- for (i = 0; i < devtype->cs_regs_count; i++)
- writel(value[i], base + cs_idx * devtype->cs_stride + i * 4);
+ /*
+ * the child node's "reg" property may contain multiple address ranges,
+ * extract the chip select for each.
+ */
+ num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE);
+ if (num_regs < 0)
+ return num_regs;
+ if (!num_regs)
+ return -EINVAL;
+ for (reg_idx = 0; reg_idx < num_regs; reg_idx++) {
+ /* get the CS index from this child node's "reg" property. */
+ ret = of_property_read_u32_index(np, "reg",
+ reg_idx * OF_REG_SIZE, &cs_idx);
+ if (ret)
+ break;
+
+ if (cs_idx >= devtype->cs_count)
+ return -EINVAL;
+
+ /* prevent re-configuring a CS that's already been configured */
+ cst = &ts->cs[cs_idx];
+ if (cst->is_applied && memcmp(value, cst->regs,
+ devtype->cs_regs_count * sizeof(u32))) {
+ dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np);
+ return -EINVAL;
+ }
+
+ /* set the timing for WEIM */
+ for (i = 0; i < devtype->cs_regs_count; i++)
+ writel(value[i],
+ base + cs_idx * devtype->cs_stride + i * 4);
+ if (!cst->is_applied) {
+ cst->is_applied = true;
+ memcpy(cst->regs, value,
+ devtype->cs_regs_count * sizeof(u32));
+ }
+ }
return 0;
}
@@ -148,6 +191,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
const struct imx_weim_devtype *devtype = of_id->data;
struct device_node *child;
int ret, have_child = 0;
+ struct cs_timing_state ts = {};
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);
@@ -156,7 +200,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
}
for_each_available_child_of_node(pdev->dev.of_node, child) {
- ret = weim_timing_setup(child, base, devtype);
+ ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
if (ret)
dev_warn(&pdev->dev, "%pOF set timing failed.\n",
child);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index dac895dc01b9..25a7d8ffdb5d 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -424,6 +424,21 @@ config HW_RANDOM_EXYNOS
will be called exynos-trng.
If unsure, say Y.
+
+config HW_RANDOM_OPTEE
+ tristate "OP-TEE based Random Number Generator support"
+ depends on OPTEE
+ default HW_RANDOM
+ help
+ This driver provides support for OP-TEE based Random Number
+ Generator on ARM SoCs where hardware entropy sources are not
+ accessible to normal world (Linux).
+
+ To compile this driver as a module, choose M here: the module
+ will be called optee-rng.
+
+ If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index e35ec3ce3a20..7c9ef4a7667f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o
obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o
obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o
obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o
+obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
new file mode 100644
index 000000000000..ddfbabaa5f8f
--- /dev/null
+++ b/drivers/char/hw_random/optee-rng.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 Linaro Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/hw_random.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+
+#define DRIVER_NAME "optee-rng"
+
+#define TEE_ERROR_HEALTH_TEST_FAIL 0x00000001
+
+/*
+ * TA_CMD_GET_ENTROPY - Get Entropy from RNG
+ *
+ * param[0] (inout memref) - Entropy buffer memory reference
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_NOT_SUPPORTED - Requested entropy size greater than size of pool
+ * TEE_ERROR_HEALTH_TEST_FAIL - Continuous health testing failed
+ */
+#define TA_CMD_GET_ENTROPY 0x0
+
+/*
+ * TA_CMD_GET_RNG_INFO - Get RNG information
+ *
+ * param[0] (out value) - value.a: RNG data-rate in bytes per second
+ * value.b: Quality/Entropy per 1024 bit of data
+ * param[1] unused
+ * param[2] unused
+ * param[3] unused
+ *
+ * Result:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ */
+#define TA_CMD_GET_RNG_INFO 0x1
+
+#define MAX_ENTROPY_REQ_SZ (4 * 1024)
+
+/**
+ * struct optee_rng_private - OP-TEE Random Number Generator private data
+ * @dev: OP-TEE based RNG device.
+ * @ctx: OP-TEE context handler.
+ * @session_id: RNG TA session identifier.
+ * @data_rate: RNG data rate.
+ * @entropy_shm_pool: Memory pool shared with RNG device.
+ * @optee_rng: OP-TEE RNG driver structure.
+ */
+struct optee_rng_private {
+ struct device *dev;
+ struct tee_context *ctx;
+ u32 session_id;
+ u32 data_rate;
+ struct tee_shm *entropy_shm_pool;
+ struct hwrng optee_rng;
+};
+
+#define to_optee_rng_private(r) \
+ container_of(r, struct optee_rng_private, optee_rng)
+
+static size_t get_optee_rng_data(struct optee_rng_private *pvt_data,
+ void *buf, size_t req_size)
+{
+ int ret = 0;
+ u8 *rng_data = NULL;
+ size_t rng_size = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_ENTROPY function of Trusted App */
+ inv_arg.func = TA_CMD_GET_ENTROPY;
+ inv_arg.session = pvt_data->session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
+ param[0].u.memref.shm = pvt_data->entropy_shm_pool;
+ param[0].u.memref.size = req_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(pvt_data->ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(pvt_data->dev, "TA_CMD_GET_ENTROPY invoke err: %x\n",
+ inv_arg.ret);
+ return 0;
+ }
+
+ rng_data = tee_shm_get_va(pvt_data->entropy_shm_pool, 0);
+ if (IS_ERR(rng_data)) {
+ dev_err(pvt_data->dev, "tee_shm_get_va failed\n");
+ return 0;
+ }
+
+ rng_size = param[0].u.memref.size;
+ memcpy(buf, rng_data, rng_size);
+
+ return rng_size;
+}
+
+static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ size_t read = 0, rng_size = 0;
+ int timeout = 1;
+ u8 *data = buf;
+
+ if (max > MAX_ENTROPY_REQ_SZ)
+ max = MAX_ENTROPY_REQ_SZ;
+
+ while (read == 0) {
+ rng_size = get_optee_rng_data(pvt_data, data, (max - read));
+
+ data += rng_size;
+ read += rng_size;
+
+ if (wait) {
+ if (timeout-- == 0)
+ return read;
+ msleep((1000 * (max - read)) / pvt_data->data_rate);
+ } else {
+ return read;
+ }
+ }
+
+ return read;
+}
+
+static int optee_rng_init(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+ struct tee_shm *entropy_shm_pool = NULL;
+
+ entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(entropy_shm_pool)) {
+ dev_err(pvt_data->dev, "tee_shm_alloc failed\n");
+ return PTR_ERR(entropy_shm_pool);
+ }
+
+ pvt_data->entropy_shm_pool = entropy_shm_pool;
+
+ return 0;
+}
+
+static void optee_rng_cleanup(struct hwrng *rng)
+{
+ struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
+
+ tee_shm_free(pvt_data->entropy_shm_pool);
+}
+
+static struct optee_rng_private pvt_data = {
+ .optee_rng = {
+ .name = DRIVER_NAME,
+ .init = optee_rng_init,
+ .cleanup = optee_rng_cleanup,
+ .read = optee_rng_read,
+ }
+};
+
+static int get_optee_rng_info(struct device *dev)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke TA_CMD_GET_RNG_INFO function of Trusted App */
+ inv_arg.func = TA_CMD_GET_RNG_INFO;
+ inv_arg.session = pvt_data.session_id;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT;
+
+ ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param);
+ if ((ret < 0) || (inv_arg.ret != 0)) {
+ dev_err(dev, "TA_CMD_GET_RNG_INFO invoke err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ pvt_data.data_rate = param[0].u.value.a;
+ pvt_data.optee_rng.quality = param[0].u.value.b;
+
+ return 0;
+}
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int optee_rng_probe(struct device *dev)
+{
+ struct tee_client_device *rng_device = to_tee_client_device(dev);
+ int ret = 0, err = -ENODEV;
+ struct tee_ioctl_open_session_arg sess_arg;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with TEE driver */
+ pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL,
+ NULL);
+ if (IS_ERR(pvt_data.ctx))
+ return -ENODEV;
+
+ /* Open session with hwrng Trusted App */
+ memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL);
+ if ((ret < 0) || (sess_arg.ret != 0)) {
+ dev_err(dev, "tee_client_open_session failed, err: %x\n",
+ sess_arg.ret);
+ err = -EINVAL;
+ goto out_ctx;
+ }
+ pvt_data.session_id = sess_arg.session;
+
+ err = get_optee_rng_info(dev);
+ if (err)
+ goto out_sess;
+
+ err = hwrng_register(&pvt_data.optee_rng);
+ if (err) {
+ dev_err(dev, "hwrng registration failed (%d)\n", err);
+ goto out_sess;
+ }
+
+ pvt_data.dev = dev;
+
+ return 0;
+
+out_sess:
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+out_ctx:
+ tee_client_close_context(pvt_data.ctx);
+
+ return err;
+}
+
+static int optee_rng_remove(struct device *dev)
+{
+ hwrng_unregister(&pvt_data.optee_rng);
+ tee_client_close_session(pvt_data.ctx, pvt_data.session_id);
+ tee_client_close_context(pvt_data.ctx);
+
+ return 0;
+}
+
+static const struct tee_client_device_id optee_rng_id_table[] = {
+ {UUID_INIT(0xab7a617c, 0xb8e7, 0x4d8f,
+ 0x83, 0x01, 0xd0, 0x9b, 0x61, 0x03, 0x6b, 0x64)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(tee, optee_rng_id_table);
+
+static struct tee_client_driver optee_rng_driver = {
+ .id_table = optee_rng_id_table,
+ .driver = {
+ .name = DRIVER_NAME,
+ .bus = &tee_bus_type,
+ .probe = optee_rng_probe,
+ .remove = optee_rng_remove,
+ },
+};
+
+static int __init optee_rng_mod_init(void)
+{
+ return driver_register(&optee_rng_driver.driver);
+}
+
+static void __exit optee_rng_mod_exit(void)
+{
+ driver_unregister(&optee_rng_driver.driver);
+}
+
+module_init(optee_rng_mod_init);
+module_exit(optee_rng_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sumit Garg <sumit.garg@linaro.org>");
+MODULE_DESCRIPTION("OP-TEE based random number generator driver");
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 7ddacae5d0b1..1adcccfa7829 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -5,3 +5,8 @@ config TEGRA_CLK_EMC
config CLK_TEGRA_BPMP
def_bool y
depends on TEGRA_BPMP
+
+config TEGRA_CLK_DFLL
+ depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+ select PM_OPP
+ def_bool y
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 6507acc843c7..4812e45c2214 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o
obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o
-obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124-dfll-fcpu.o
+obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o
obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o
obj-y += cvb.o
obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 609e363dabf8..0400e5b1d627 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1,7 +1,7 @@
/*
* clk-dfll.c - Tegra DFLL clock source common code
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -47,6 +47,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -243,6 +244,12 @@ enum dfll_tune_range {
DFLL_TUNE_LOW = 1,
};
+
+enum tegra_dfll_pmu_if {
+ TEGRA_DFLL_PMU_I2C = 0,
+ TEGRA_DFLL_PMU_PWM = 1,
+};
+
/**
* struct dfll_rate_req - target DFLL rate request data
* @rate: target frequency, after the postscaling
@@ -300,10 +307,19 @@ struct tegra_dfll {
u32 i2c_reg;
u32 i2c_slave_addr;
- /* i2c_lut array entries are regulator framework selectors */
- unsigned i2c_lut[MAX_DFLL_VOLTAGES];
- int i2c_lut_size;
- u8 lut_min, lut_max, lut_safe;
+ /* lut array entries are regulator framework selectors or PWM values*/
+ unsigned lut[MAX_DFLL_VOLTAGES];
+ unsigned long lut_uv[MAX_DFLL_VOLTAGES];
+ int lut_size;
+ u8 lut_bottom, lut_min, lut_max, lut_safe;
+
+ /* PWM interface */
+ enum tegra_dfll_pmu_if pmu_if;
+ unsigned long pwm_rate;
+ struct pinctrl *pwm_pin;
+ struct pinctrl_state *pwm_enable_state;
+ struct pinctrl_state *pwm_disable_state;
+ u32 reg_init_uV;
};
#define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw)
@@ -490,6 +506,34 @@ static void dfll_set_mode(struct tegra_dfll *td,
}
/*
+ * DVCO rate control
+ */
+
+static unsigned long get_dvco_rate_below(struct tegra_dfll *td, u8 out_min)
+{
+ struct dev_pm_opp *opp;
+ unsigned long rate, prev_rate;
+ unsigned long uv, min_uv;
+
+ min_uv = td->lut_uv[out_min];
+ for (rate = 0, prev_rate = 0; ; rate++) {
+ opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ uv = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (uv && uv > min_uv)
+ return prev_rate;
+
+ prev_rate = rate;
+ }
+
+ return prev_rate;
+}
+
+/*
* DFLL-to-I2C controller interface
*/
@@ -518,6 +562,118 @@ static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable)
return 0;
}
+
+/*
+ * DFLL-to-PWM controller interface
+ */
+
+/**
+ * dfll_pwm_set_output_enabled - enable/disable PWM voltage requests
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the PWM voltage requests
+ *
+ * Set the master enable control for PWM control value updates. If disabled,
+ * then the PWM signal is not driven. Also configure the PWM output pad
+ * to the appropriate state.
+ */
+static int dfll_pwm_set_output_enabled(struct tegra_dfll *td, bool enable)
+{
+ int ret;
+ u32 val, div;
+
+ if (enable) {
+ ret = pinctrl_select_state(td->pwm_pin, td->pwm_enable_state);
+ if (ret < 0) {
+ dev_err(td->dev, "setting enable state failed\n");
+ return -EINVAL;
+ }
+ val = dfll_readl(td, DFLL_OUTPUT_CFG);
+ val &= ~DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+ div = DIV_ROUND_UP(td->ref_rate, td->pwm_rate);
+ val |= (div << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT)
+ & DFLL_OUTPUT_CFG_PWM_DIV_MASK;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+
+ val |= DFLL_OUTPUT_CFG_PWM_ENABLE;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+ } else {
+ ret = pinctrl_select_state(td->pwm_pin, td->pwm_disable_state);
+ if (ret < 0)
+ dev_warn(td->dev, "setting disable state failed\n");
+
+ val = dfll_readl(td, DFLL_OUTPUT_CFG);
+ val &= ~DFLL_OUTPUT_CFG_PWM_ENABLE;
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
+ }
+
+ return 0;
+}
+
+/**
+ * dfll_set_force_output_value - set fixed value for force output
+ * @td: DFLL instance
+ * @out_val: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value when
+ * force output is enabled.
+ */
+static u32 dfll_set_force_output_value(struct tegra_dfll *td, u8 out_val)
+{
+ u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+ val = (val & DFLL_OUTPUT_FORCE_ENABLE) | (out_val & OUT_MASK);
+ dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+ dfll_wmb(td);
+
+ return dfll_readl(td, DFLL_OUTPUT_FORCE);
+}
+
+/**
+ * dfll_set_force_output_enabled - enable/disable force output
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the force output
+ *
+ * Set the enable control for fouce output with fixed value.
+ */
+static void dfll_set_force_output_enabled(struct tegra_dfll *td, bool enable)
+{
+ u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE);
+
+ if (enable)
+ val |= DFLL_OUTPUT_FORCE_ENABLE;
+ else
+ val &= ~DFLL_OUTPUT_FORCE_ENABLE;
+
+ dfll_writel(td, val, DFLL_OUTPUT_FORCE);
+ dfll_wmb(td);
+}
+
+/**
+ * dfll_force_output - force output a fixed value
+ * @td: DFLL instance
+ * @out_sel: value to force output
+ *
+ * Set the fixed value for force output, DFLL will output this value.
+ */
+static int dfll_force_output(struct tegra_dfll *td, unsigned int out_sel)
+{
+ u32 val;
+
+ if (out_sel > OUT_MASK)
+ return -EINVAL;
+
+ val = dfll_set_force_output_value(td, out_sel);
+ if ((td->mode < DFLL_CLOSED_LOOP) &&
+ !(val & DFLL_OUTPUT_FORCE_ENABLE)) {
+ dfll_set_force_output_enabled(td, true);
+ }
+
+ return 0;
+}
+
/**
* dfll_load_lut - load the voltage lookup table
* @td: struct tegra_dfll *
@@ -539,7 +695,7 @@ static void dfll_load_i2c_lut(struct tegra_dfll *td)
lut_index = i;
val = regulator_list_hardware_vsel(td->vdd_reg,
- td->i2c_lut[lut_index]);
+ td->lut[lut_index]);
__raw_writel(val, td->lut_base + i * 4);
}
@@ -594,24 +750,41 @@ static void dfll_init_out_if(struct tegra_dfll *td)
{
u32 val;
- td->lut_min = 0;
- td->lut_max = td->i2c_lut_size - 1;
- td->lut_safe = td->lut_min + 1;
+ td->lut_min = td->lut_bottom;
+ td->lut_max = td->lut_size - 1;
+ td->lut_safe = td->lut_min + (td->lut_min < td->lut_max ? 1 : 0);
+
+ /* clear DFLL_OUTPUT_CFG before setting new value */
+ dfll_writel(td, 0, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
- dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG);
val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) |
- (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
- (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
- dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
- dfll_i2c_wmb(td);
+ (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
+ (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
+ dfll_writel(td, val, DFLL_OUTPUT_CFG);
+ dfll_wmb(td);
dfll_writel(td, 0, DFLL_OUTPUT_FORCE);
dfll_i2c_writel(td, 0, DFLL_INTR_EN);
dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK,
DFLL_INTR_STS);
- dfll_load_i2c_lut(td);
- dfll_init_i2c_if(td);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM) {
+ u32 vinit = td->reg_init_uV;
+ int vstep = td->soc->alignment.step_uv;
+ unsigned long vmin = td->lut_uv[0];
+
+ /* set initial voltage */
+ if ((vinit >= vmin) && vstep) {
+ unsigned int vsel;
+
+ vsel = DIV_ROUND_UP((vinit - vmin), vstep);
+ dfll_force_output(td, vsel);
+ }
+ } else {
+ dfll_load_i2c_lut(td);
+ dfll_init_i2c_if(td);
+ }
}
/*
@@ -631,17 +804,17 @@ static void dfll_init_out_if(struct tegra_dfll *td)
static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
{
struct dev_pm_opp *opp;
- int i, uv;
+ int i, align_step;
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
if (IS_ERR(opp))
return PTR_ERR(opp);
- uv = dev_pm_opp_get_voltage(opp);
+ align_step = dev_pm_opp_get_voltage(opp) / td->soc->alignment.step_uv;
dev_pm_opp_put(opp);
- for (i = 0; i < td->i2c_lut_size; i++) {
- if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
+ for (i = td->lut_bottom; i < td->lut_size; i++) {
+ if ((td->lut_uv[i] / td->soc->alignment.step_uv) >= align_step)
return i;
}
@@ -863,9 +1036,14 @@ static int dfll_lock(struct tegra_dfll *td)
return -EINVAL;
}
- dfll_i2c_set_output_enabled(td, true);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ dfll_pwm_set_output_enabled(td, true);
+ else
+ dfll_i2c_set_output_enabled(td, true);
+
dfll_set_mode(td, DFLL_CLOSED_LOOP);
dfll_set_frequency_request(td, req);
+ dfll_set_force_output_enabled(td, false);
return 0;
default:
@@ -889,7 +1067,10 @@ static int dfll_unlock(struct tegra_dfll *td)
case DFLL_CLOSED_LOOP:
dfll_set_open_loop_config(td);
dfll_set_mode(td, DFLL_OPEN_LOOP);
- dfll_i2c_set_output_enabled(td, false);
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ dfll_pwm_set_output_enabled(td, false);
+ else
+ dfll_i2c_set_output_enabled(td, false);
return 0;
case DFLL_OPEN_LOOP:
@@ -1171,15 +1352,17 @@ static int attr_registers_show(struct seq_file *s, void *data)
seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
dfll_i2c_readl(td, offs));
- seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
- offs = DFLL_I2C_CLK_DIVISOR;
- seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
- __raw_readl(td->i2c_controller_base + offs));
-
- seq_puts(s, "\nLUT:\n");
- for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4)
+ if (td->pmu_if == TEGRA_DFLL_PMU_I2C) {
+ seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
+ offs = DFLL_I2C_CLK_DIVISOR;
seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
- __raw_readl(td->lut_base + offs));
+ __raw_readl(td->i2c_controller_base + offs));
+
+ seq_puts(s, "\nLUT:\n");
+ for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4)
+ seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+ __raw_readl(td->lut_base + offs));
+ }
return 0;
}
@@ -1349,15 +1532,21 @@ di_err1:
*/
static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
{
- int i, n_voltages, reg_uV;
+ int i, n_voltages, reg_uV,reg_volt_id, align_step;
+
+ if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+ return -EINVAL;
+ align_step = uV / td->soc->alignment.step_uv;
n_voltages = regulator_count_voltages(td->vdd_reg);
for (i = 0; i < n_voltages; i++) {
reg_uV = regulator_list_voltage(td->vdd_reg, i);
if (reg_uV < 0)
break;
- if (uV == reg_uV)
+ reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+ if (align_step == reg_volt_id)
return i;
}
@@ -1371,15 +1560,21 @@ static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
* */
static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
{
- int i, n_voltages, reg_uV;
+ int i, n_voltages, reg_uV, reg_volt_id, align_step;
+ if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM))
+ return -EINVAL;
+
+ align_step = uV / td->soc->alignment.step_uv;
n_voltages = regulator_count_voltages(td->vdd_reg);
for (i = 0; i < n_voltages; i++) {
reg_uV = regulator_list_voltage(td->vdd_reg, i);
if (reg_uV < 0)
break;
- if (uV <= reg_uV)
+ reg_volt_id = reg_uV / td->soc->alignment.step_uv;
+
+ if (align_step <= reg_volt_id)
return i;
}
@@ -1387,9 +1582,61 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
return -EINVAL;
}
+/*
+ * dfll_build_pwm_lut - build the PWM regulator lookup table
+ * @td: DFLL instance
+ * @v_max: Vmax from OPP table
+ *
+ * Look-up table in h/w is ignored when PWM is used as DFLL interface to PMIC.
+ * In this case closed loop output is controlling duty cycle directly. The s/w
+ * look-up that maps PWM duty cycle to voltage is still built by this function.
+ */
+static int dfll_build_pwm_lut(struct tegra_dfll *td, unsigned long v_max)
+{
+ int i;
+ unsigned long rate, reg_volt;
+ u8 lut_bottom = MAX_DFLL_VOLTAGES;
+ int v_min = td->soc->cvb->min_millivolts * 1000;
+
+ for (i = 0; i < MAX_DFLL_VOLTAGES; i++) {
+ reg_volt = td->lut_uv[i];
+
+ /* since opp voltage is exact mv */
+ reg_volt = (reg_volt / 1000) * 1000;
+ if (reg_volt > v_max)
+ break;
+
+ td->lut[i] = i;
+ if ((lut_bottom == MAX_DFLL_VOLTAGES) && (reg_volt >= v_min))
+ lut_bottom = i;
+ }
+
+ /* determine voltage boundaries */
+ td->lut_size = i;
+ if ((lut_bottom == MAX_DFLL_VOLTAGES) ||
+ (lut_bottom + 1 >= td->lut_size)) {
+ dev_err(td->dev, "no voltage above DFLL minimum %d mV\n",
+ td->soc->cvb->min_millivolts);
+ return -EINVAL;
+ }
+ td->lut_bottom = lut_bottom;
+
+ /* determine rate boundaries */
+ rate = get_dvco_rate_below(td, td->lut_bottom);
+ if (!rate) {
+ dev_err(td->dev, "no opp below DFLL minimum voltage %d mV\n",
+ td->soc->cvb->min_millivolts);
+ return -EINVAL;
+ }
+ td->dvco_rate_min = rate;
+
+ return 0;
+}
+
/**
* dfll_build_i2c_lut - build the I2C voltage register lookup table
* @td: DFLL instance
+ * @v_max: Vmax from OPP table
*
* The DFLL hardware has 33 bytes of look-up table RAM that must be filled with
* PMIC voltage register values that span the entire DFLL operating range.
@@ -1397,33 +1644,24 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
* the soc-specific platform driver (td->soc->opp_dev) and the PMIC
* register-to-voltage mapping queried from the regulator framework.
*
- * On success, fills in td->i2c_lut and returns 0, or -err on failure.
+ * On success, fills in td->lut and returns 0, or -err on failure.
*/
-static int dfll_build_i2c_lut(struct tegra_dfll *td)
+static int dfll_build_i2c_lut(struct tegra_dfll *td, unsigned long v_max)
{
+ unsigned long rate, v, v_opp;
int ret = -EINVAL;
- int j, v, v_max, v_opp;
- int selector;
- unsigned long rate;
- struct dev_pm_opp *opp;
- int lut;
-
- rate = ULONG_MAX;
- opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
- if (IS_ERR(opp)) {
- dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
- goto out;
- }
- v_max = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
+ int j, selector, lut;
v = td->soc->cvb->min_millivolts * 1000;
lut = find_vdd_map_entry_exact(td, v);
if (lut < 0)
goto out;
- td->i2c_lut[0] = lut;
+ td->lut[0] = lut;
+ td->lut_bottom = 0;
for (j = 1, rate = 0; ; rate++) {
+ struct dev_pm_opp *opp;
+
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
if (IS_ERR(opp))
break;
@@ -1435,39 +1673,64 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
dev_pm_opp_put(opp);
for (;;) {
- v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
+ v += max(1UL, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
if (v >= v_opp)
break;
selector = find_vdd_map_entry_min(td, v);
if (selector < 0)
goto out;
- if (selector != td->i2c_lut[j - 1])
- td->i2c_lut[j++] = selector;
+ if (selector != td->lut[j - 1])
+ td->lut[j++] = selector;
}
v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp;
selector = find_vdd_map_entry_exact(td, v);
if (selector < 0)
goto out;
- if (selector != td->i2c_lut[j - 1])
- td->i2c_lut[j++] = selector;
+ if (selector != td->lut[j - 1])
+ td->lut[j++] = selector;
if (v >= v_max)
break;
}
- td->i2c_lut_size = j;
+ td->lut_size = j;
if (!td->dvco_rate_min)
dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n",
td->soc->cvb->min_millivolts);
- else
+ else {
ret = 0;
+ for (j = 0; j < td->lut_size; j++)
+ td->lut_uv[j] =
+ regulator_list_voltage(td->vdd_reg,
+ td->lut[j]);
+ }
out:
return ret;
}
+static int dfll_build_lut(struct tegra_dfll *td)
+{
+ unsigned long rate, v_max;
+ struct dev_pm_opp *opp;
+
+ rate = ULONG_MAX;
+ opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
+ if (IS_ERR(opp)) {
+ dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
+ return -EINVAL;
+ }
+ v_max = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (td->pmu_if == TEGRA_DFLL_PMU_PWM)
+ return dfll_build_pwm_lut(td, v_max);
+ else
+ return dfll_build_i2c_lut(td, v_max);
+}
+
/**
* read_dt_param - helper function for reading required parameters from the DT
* @td: DFLL instance
@@ -1526,11 +1789,56 @@ static int dfll_fetch_i2c_params(struct tegra_dfll *td)
}
td->i2c_reg = vsel_reg;
- ret = dfll_build_i2c_lut(td);
- if (ret) {
- dev_err(td->dev, "couldn't build I2C LUT\n");
+ return 0;
+}
+
+static int dfll_fetch_pwm_params(struct tegra_dfll *td)
+{
+ int ret, i;
+ u32 pwm_period;
+
+ if (!td->soc->alignment.step_uv || !td->soc->alignment.offset_uv) {
+ dev_err(td->dev,
+ "Missing step or alignment info for PWM regulator");
+ return -EINVAL;
+ }
+ for (i = 0; i < MAX_DFLL_VOLTAGES; i++)
+ td->lut_uv[i] = td->soc->alignment.offset_uv +
+ i * td->soc->alignment.step_uv;
+
+ ret = read_dt_param(td, "nvidia,pwm-tristate-microvolts",
+ &td->reg_init_uV);
+ if (!ret) {
+ dev_err(td->dev, "couldn't get initialized voltage\n");
+ return ret;
+ }
+
+ ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
+ if (!ret) {
+ dev_err(td->dev, "couldn't get PWM period\n");
return ret;
}
+ td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
+
+ td->pwm_pin = devm_pinctrl_get(td->dev);
+ if (IS_ERR(td->pwm_pin)) {
+ dev_err(td->dev, "DT: missing pinctrl device\n");
+ return PTR_ERR(td->pwm_pin);
+ }
+
+ td->pwm_enable_state = pinctrl_lookup_state(td->pwm_pin,
+ "dvfs_pwm_enable");
+ if (IS_ERR(td->pwm_enable_state)) {
+ dev_err(td->dev, "DT: missing pwm enabled state\n");
+ return PTR_ERR(td->pwm_enable_state);
+ }
+
+ td->pwm_disable_state = pinctrl_lookup_state(td->pwm_pin,
+ "dvfs_pwm_disable");
+ if (IS_ERR(td->pwm_disable_state)) {
+ dev_err(td->dev, "DT: missing pwm disabled state\n");
+ return PTR_ERR(td->pwm_disable_state);
+ }
return 0;
}
@@ -1597,16 +1905,6 @@ int tegra_dfll_register(struct platform_device *pdev,
td->soc = soc;
- td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
- if (IS_ERR(td->vdd_reg)) {
- ret = PTR_ERR(td->vdd_reg);
- if (ret != -EPROBE_DEFER)
- dev_err(td->dev, "couldn't get vdd_cpu regulator: %d\n",
- ret);
-
- return ret;
- }
-
td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
if (IS_ERR(td->dvco_rst)) {
dev_err(td->dev, "couldn't get dvco reset\n");
@@ -1619,10 +1917,27 @@ int tegra_dfll_register(struct platform_device *pdev,
return ret;
}
- ret = dfll_fetch_i2c_params(td);
+ if (of_property_read_bool(td->dev->of_node, "nvidia,pwm-to-pmic")) {
+ td->pmu_if = TEGRA_DFLL_PMU_PWM;
+ ret = dfll_fetch_pwm_params(td);
+ } else {
+ td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
+ if (IS_ERR(td->vdd_reg)) {
+ dev_err(td->dev, "couldn't get vdd_cpu regulator\n");
+ return PTR_ERR(td->vdd_reg);
+ }
+ td->pmu_if = TEGRA_DFLL_PMU_I2C;
+ ret = dfll_fetch_i2c_params(td);
+ }
if (ret)
return ret;
+ ret = dfll_build_lut(td);
+ if (ret) {
+ dev_err(td->dev, "couldn't build LUT\n");
+ return ret;
+ }
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(td->dev, "no control register resource\n");
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index 83352c8078f2..85d0d95223f3 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -1,6 +1,6 @@
/*
* clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver
- * Copyright (C) 2013 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2013-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -22,11 +22,14 @@
#include <linux/reset.h>
#include <linux/types.h>
+#include "cvb.h"
+
/**
* struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver
* @dev: struct device * that holds the OPP table for the DFLL
* @max_freq: maximum frequency supported on this SoC
* @cvb: CPU frequency table for this SoC
+ * @alignment: parameters of the regulator step and offset
* @init_clock_trimmers: callback to initialize clock trimmers
* @set_clock_trimmers_high: callback to tune clock trimmers for high voltage
* @set_clock_trimmers_low: callback to tune clock trimmers for low voltage
@@ -35,6 +38,7 @@ struct tegra_dfll_soc_data {
struct device *dev;
unsigned long max_freq;
const struct cvb_table *cvb;
+ struct rail_alignment alignment;
void (*init_clock_trimmers)(void);
void (*set_clock_trimmers_high)(void);
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index edc31bb56674..e8ec42bf8638 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -1,7 +1,7 @@
/*
* Tegra124 DFLL FCPU clock source driver
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* Aleksandr Frid <afrid@nvidia.com>
* Paul Walmsley <pwalmsley@nvidia.com>
@@ -21,15 +21,24 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <soc/tegra/fuse.h>
#include "clk.h"
#include "clk-dfll.h"
#include "cvb.h"
+struct dfll_fcpu_data {
+ const unsigned long *cpu_max_freq_table;
+ unsigned int cpu_max_freq_table_size;
+ const struct cvb_table *cpu_cvb_tables;
+ unsigned int cpu_cvb_tables_size;
+};
+
/* Maximum CPU frequency, indexed by CPU speedo id */
-static const unsigned long cpu_max_freq_table[] = {
+static const unsigned long tegra124_cpu_max_freq_table[] = {
[0] = 2014500000UL,
[1] = 2320500000UL,
[2] = 2116500000UL,
@@ -42,9 +51,6 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
.process_id = -1,
.min_millivolts = 900,
.max_millivolts = 1260,
- .alignment = {
- .step_uv = 10000, /* 10mV */
- },
.speedo_scale = 100,
.voltage_scale = 1000,
.entries = {
@@ -82,16 +88,493 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
},
};
+static const unsigned long tegra210_cpu_max_freq_table[] = {
+ [0] = 1912500000UL,
+ [1] = 1912500000UL,
+ [2] = 2218500000UL,
+ [3] = 1785000000UL,
+ [4] = 1632000000UL,
+ [5] = 1912500000UL,
+ [6] = 2014500000UL,
+ [7] = 1734000000UL,
+ [8] = 1683000000UL,
+ [9] = 1555500000UL,
+ [10] = 1504500000UL,
+};
+
+#define CPU_CVB_TABLE \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 1007452, -23865, 370 } }, \
+ { 306000000UL, { 1052709, -24875, 370 } }, \
+ { 408000000UL, { 1099069, -25895, 370 } }, \
+ { 510000000UL, { 1146534, -26905, 370 } }, \
+ { 612000000UL, { 1195102, -27915, 370 } }, \
+ { 714000000UL, { 1244773, -28925, 370 } }, \
+ { 816000000UL, { 1295549, -29935, 370 } }, \
+ { 918000000UL, { 1347428, -30955, 370 } }, \
+ { 1020000000UL, { 1400411, -31965, 370 } }, \
+ { 1122000000UL, { 1454497, -32975, 370 } }, \
+ { 1224000000UL, { 1509687, -33985, 370 } }, \
+ { 1326000000UL, { 1565981, -35005, 370 } }, \
+ { 1428000000UL, { 1623379, -36015, 370 } }, \
+ { 1530000000UL, { 1681880, -37025, 370 } }, \
+ { 1632000000UL, { 1741485, -38035, 370 } }, \
+ { 1734000000UL, { 1802194, -39055, 370 } }, \
+ { 1836000000UL, { 1864006, -40065, 370 } }, \
+ { 1912500000UL, { 1910780, -40815, 370 } }, \
+ { 2014500000UL, { 1227000, 0, 0 } }, \
+ { 2218500000UL, { 1227000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_XA \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 1250024, -39785, 565 } }, \
+ { 306000000UL, { 1297556, -41145, 565 } }, \
+ { 408000000UL, { 1346718, -42505, 565 } }, \
+ { 510000000UL, { 1397511, -43855, 565 } }, \
+ { 612000000UL, { 1449933, -45215, 565 } }, \
+ { 714000000UL, { 1503986, -46575, 565 } }, \
+ { 816000000UL, { 1559669, -47935, 565 } }, \
+ { 918000000UL, { 1616982, -49295, 565 } }, \
+ { 1020000000UL, { 1675926, -50645, 565 } }, \
+ { 1122000000UL, { 1736500, -52005, 565 } }, \
+ { 1224000000UL, { 1798704, -53365, 565 } }, \
+ { 1326000000UL, { 1862538, -54725, 565 } }, \
+ { 1428000000UL, { 1928003, -56085, 565 } }, \
+ { 1530000000UL, { 1995097, -57435, 565 } }, \
+ { 1606500000UL, { 2046149, -58445, 565 } }, \
+ { 1632000000UL, { 2063822, -58795, 565 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM1 \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 734429, 0, 0 } }, \
+ { 306000000UL, { 768191, 0, 0 } }, \
+ { 408000000UL, { 801953, 0, 0 } }, \
+ { 510000000UL, { 835715, 0, 0 } }, \
+ { 612000000UL, { 869477, 0, 0 } }, \
+ { 714000000UL, { 903239, 0, 0 } }, \
+ { 816000000UL, { 937001, 0, 0 } }, \
+ { 918000000UL, { 970763, 0, 0 } }, \
+ { 1020000000UL, { 1004525, 0, 0 } }, \
+ { 1122000000UL, { 1038287, 0, 0 } }, \
+ { 1224000000UL, { 1072049, 0, 0 } }, \
+ { 1326000000UL, { 1105811, 0, 0 } }, \
+ { 1428000000UL, { 1130000, 0, 0 } }, \
+ { 1555500000UL, { 1130000, 0, 0 } }, \
+ { 1632000000UL, { 1170000, 0, 0 } }, \
+ { 1734000000UL, { 1227500, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM2 \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 742283, 0, 0 } }, \
+ { 306000000UL, { 776249, 0, 0 } }, \
+ { 408000000UL, { 810215, 0, 0 } }, \
+ { 510000000UL, { 844181, 0, 0 } }, \
+ { 612000000UL, { 878147, 0, 0 } }, \
+ { 714000000UL, { 912113, 0, 0 } }, \
+ { 816000000UL, { 946079, 0, 0 } }, \
+ { 918000000UL, { 980045, 0, 0 } }, \
+ { 1020000000UL, { 1014011, 0, 0 } }, \
+ { 1122000000UL, { 1047977, 0, 0 } }, \
+ { 1224000000UL, { 1081943, 0, 0 } }, \
+ { 1326000000UL, { 1090000, 0, 0 } }, \
+ { 1479000000UL, { 1090000, 0, 0 } }, \
+ { 1555500000UL, { 1162000, 0, 0 } }, \
+ { 1683000000UL, { 1195000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 742283, 0, 0 } }, \
+ { 306000000UL, { 776249, 0, 0 } }, \
+ { 408000000UL, { 810215, 0, 0 } }, \
+ { 510000000UL, { 844181, 0, 0 } }, \
+ { 612000000UL, { 878147, 0, 0 } }, \
+ { 714000000UL, { 912113, 0, 0 } }, \
+ { 816000000UL, { 946079, 0, 0 } }, \
+ { 918000000UL, { 980045, 0, 0 } }, \
+ { 1020000000UL, { 1014011, 0, 0 } }, \
+ { 1122000000UL, { 1047977, 0, 0 } }, \
+ { 1224000000UL, { 1081943, 0, 0 } }, \
+ { 1326000000UL, { 1090000, 0, 0 } }, \
+ { 1479000000UL, { 1090000, 0, 0 } }, \
+ { 1504500000UL, { 1120000, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+#define CPU_CVB_TABLE_ODN \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 204000000UL, { 721094, 0, 0 } }, \
+ { 306000000UL, { 754040, 0, 0 } }, \
+ { 408000000UL, { 786986, 0, 0 } }, \
+ { 510000000UL, { 819932, 0, 0 } }, \
+ { 612000000UL, { 852878, 0, 0 } }, \
+ { 714000000UL, { 885824, 0, 0 } }, \
+ { 816000000UL, { 918770, 0, 0 } }, \
+ { 918000000UL, { 915716, 0, 0 } }, \
+ { 1020000000UL, { 984662, 0, 0 } }, \
+ { 1122000000UL, { 1017608, 0, 0 } }, \
+ { 1224000000UL, { 1050554, 0, 0 } }, \
+ { 1326000000UL, { 1083500, 0, 0 } }, \
+ { 1428000000UL, { 1116446, 0, 0 } }, \
+ { 1581000000UL, { 1130000, 0, 0 } }, \
+ { 1683000000UL, { 1168000, 0, 0 } }, \
+ { 1785000000UL, { 1227500, 0, 0 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }
+
+static struct cvb_table tegra210_cpu_cvb_tables[] = {
+ {
+ .speedo_id = 10,
+ .process_id = 0,
+ .min_millivolts = 840,
+ .max_millivolts = 1120,
+ CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 10,
+ .process_id = 1,
+ .min_millivolts = 840,
+ .max_millivolts = 1120,
+ CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 9,
+ .process_id = 0,
+ .min_millivolts = 900,
+ .max_millivolts = 1162,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 9,
+ .process_id = 1,
+ .min_millivolts = 900,
+ .max_millivolts = 1162,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 8,
+ .process_id = 0,
+ .min_millivolts = 900,
+ .max_millivolts = 1195,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 8,
+ .process_id = 1,
+ .min_millivolts = 900,
+ .max_millivolts = 1195,
+ CPU_CVB_TABLE_EUCM2,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 7,
+ .process_id = 0,
+ .min_millivolts = 841,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_EUCM1,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 7,
+ .process_id = 1,
+ .min_millivolts = 841,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_EUCM1,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 6,
+ .process_id = 0,
+ .min_millivolts = 870,
+ .max_millivolts = 1150,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 6,
+ .process_id = 1,
+ .min_millivolts = 870,
+ .max_millivolts = 1150,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ }
+ },
+ {
+ .speedo_id = 5,
+ .process_id = 0,
+ .min_millivolts = 818,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 5,
+ .process_id = 1,
+ .min_millivolts = 818,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 4,
+ .process_id = -1,
+ .min_millivolts = 918,
+ .max_millivolts = 1113,
+ CPU_CVB_TABLE_XA,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x17711BD,
+ }
+ },
+ {
+ .speedo_id = 3,
+ .process_id = 0,
+ .min_millivolts = 825,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_ODN,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 3,
+ .process_id = 1,
+ .min_millivolts = 825,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE_ODN,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 2,
+ .process_id = 0,
+ .min_millivolts = 870,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ }
+ },
+ {
+ .speedo_id = 2,
+ .process_id = 1,
+ .min_millivolts = 870,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = 0,
+ .min_millivolts = 837,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = 1,
+ .min_millivolts = 837,
+ .max_millivolts = 1227,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 0,
+ .process_id = 0,
+ .min_millivolts = 850,
+ .max_millivolts = 1170,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x20091d9,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+ {
+ .speedo_id = 0,
+ .process_id = 1,
+ .min_millivolts = 850,
+ .max_millivolts = 1170,
+ CPU_CVB_TABLE,
+ .cpu_dfll_data = {
+ .tune0_low = 0xffead0ff,
+ .tune0_high = 0xffead0ff,
+ .tune1 = 0x25501d0,
+ .tune_high_min_millivolts = 864,
+ }
+ },
+};
+
+static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra124_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra124_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra124_cpu_cvb_tables)
+};
+
+static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra210_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra210_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra210_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra210_cpu_cvb_tables),
+};
+
+static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
+ {
+ .compatible = "nvidia,tegra124-dfll",
+ .data = &tegra124_dfll_fcpu_data,
+ },
+ {
+ .compatible = "nvidia,tegra210-dfll",
+ .data = &tegra210_dfll_fcpu_data
+ },
+ { },
+};
+
+static void get_alignment_from_dt(struct device *dev,
+ struct rail_alignment *align)
+{
+ if (of_property_read_u32(dev->of_node,
+ "nvidia,pwm-voltage-step-microvolts",
+ &align->step_uv))
+ align->step_uv = 0;
+
+ if (of_property_read_u32(dev->of_node,
+ "nvidia,pwm-min-microvolts",
+ &align->offset_uv))
+ align->offset_uv = 0;
+}
+
+static int get_alignment_from_regulator(struct device *dev,
+ struct rail_alignment *align)
+{
+ struct regulator *reg = devm_regulator_get(dev, "vdd-cpu");
+
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ align->offset_uv = regulator_list_voltage(reg, 0);
+ align->step_uv = regulator_get_linear_step(reg);
+
+ devm_regulator_put(reg);
+
+ return 0;
+}
+
static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
{
int process_id, speedo_id, speedo_value, err;
struct tegra_dfll_soc_data *soc;
+ const struct dfll_fcpu_data *fcpu_data;
+ struct rail_alignment align;
+
+ fcpu_data = of_device_get_match_data(&pdev->dev);
+ if (!fcpu_data)
+ return -ENODEV;
process_id = tegra_sku_info.cpu_process_id;
speedo_id = tegra_sku_info.cpu_speedo_id;
speedo_value = tegra_sku_info.cpu_speedo_value;
- if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) {
+ if (speedo_id >= fcpu_data->cpu_max_freq_table_size) {
dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n",
speedo_id);
return -ENODEV;
@@ -107,12 +590,22 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
return -ENODEV;
}
- soc->max_freq = cpu_max_freq_table[speedo_id];
+ if (of_property_read_bool(pdev->dev.of_node, "nvidia,pwm-to-pmic")) {
+ get_alignment_from_dt(&pdev->dev, &align);
+ } else {
+ err = get_alignment_from_regulator(&pdev->dev, &align);
+ if (err)
+ return err;
+ }
+
+ soc->max_freq = fcpu_data->cpu_max_freq_table[speedo_id];
+
+ soc->cvb = tegra_cvb_add_opp_table(soc->dev, fcpu_data->cpu_cvb_tables,
+ fcpu_data->cpu_cvb_tables_size,
+ &align, process_id, speedo_id,
+ speedo_value, soc->max_freq);
+ soc->alignment = align;
- soc->cvb = tegra_cvb_add_opp_table(soc->dev, tegra124_cpu_cvb_tables,
- ARRAY_SIZE(tegra124_cpu_cvb_tables),
- process_id, speedo_id, speedo_value,
- soc->max_freq);
if (IS_ERR(soc->cvb)) {
dev_err(&pdev->dev, "couldn't add OPP table: %ld\n",
PTR_ERR(soc->cvb));
@@ -144,11 +637,6 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
- { .compatible = "nvidia,tegra124-dfll", },
- { },
-};
-
static const struct dev_pm_ops tegra124_dfll_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
tegra_dfll_runtime_resume, NULL)
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index da9e8e7b5ce5..35eeb6adc68e 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -1,7 +1,7 @@
/*
* Utility functions for parsing Tegra CVB voltage tables
*
- * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -62,9 +62,9 @@ static int round_voltage(int mv, const struct rail_alignment *align, int up)
}
static int build_opp_table(struct device *dev, const struct cvb_table *table,
+ struct rail_alignment *align,
int speedo_value, unsigned long max_freq)
{
- const struct rail_alignment *align = &table->alignment;
int i, ret, dfll_mv, min_mv, max_mv;
min_mv = round_voltage(table->min_millivolts, align, UP);
@@ -109,8 +109,9 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table,
*/
const struct cvb_table *
tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
- size_t count, int process_id, int speedo_id,
- int speedo_value, unsigned long max_freq)
+ size_t count, struct rail_alignment *align,
+ int process_id, int speedo_id, int speedo_value,
+ unsigned long max_freq)
{
size_t i;
int ret;
@@ -124,7 +125,8 @@ tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
if (table->process_id != -1 && table->process_id != process_id)
continue;
- ret = build_opp_table(dev, table, speedo_value, max_freq);
+ ret = build_opp_table(dev, table, align, speedo_value,
+ max_freq);
return ret ? ERR_PTR(ret) : table;
}
diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h
index c1f077993b2a..91a1941c21ef 100644
--- a/drivers/clk/tegra/cvb.h
+++ b/drivers/clk/tegra/cvb.h
@@ -41,6 +41,7 @@ struct cvb_cpu_dfll_data {
u32 tune0_low;
u32 tune0_high;
u32 tune1;
+ unsigned int tune_high_min_millivolts;
};
struct cvb_table {
@@ -49,7 +50,6 @@ struct cvb_table {
int min_millivolts;
int max_millivolts;
- struct rail_alignment alignment;
int speedo_scale;
int voltage_scale;
@@ -59,8 +59,9 @@ struct cvb_table {
const struct cvb_table *
tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *cvb_tables,
- size_t count, int process_id, int speedo_id,
- int speedo_value, unsigned long max_freq);
+ size_t count, struct rail_alignment *align,
+ int process_id, int speedo_id, int speedo_value,
+ unsigned long max_freq);
void tegra_cvb_remove_opp_table(struct device *dev,
const struct cvb_table *table,
unsigned long max_freq);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 688f10227793..1a6778e81f90 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -272,8 +272,8 @@ config ARM_TEGRA20_CPUFREQ
This adds the CPUFreq driver support for Tegra20 SOCs.
config ARM_TEGRA124_CPUFREQ
- tristate "Tegra124 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
+ bool "Tegra124 CPUFreq support"
+ depends on ARCH_TEGRA && CPUFREQ_DT
default y
help
This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index b1c5468dca16..47729a22c159 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -119,6 +119,7 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "mediatek,mt8176", },
{ .compatible = "nvidia,tegra124", },
+ { .compatible = "nvidia,tegra210", },
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 43530254201a..ba3795e13ac6 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -22,11 +22,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
-#include <linux/regulator/consumer.h>
#include <linux/types.h>
struct tegra124_cpufreq_priv {
- struct regulator *vdd_cpu_reg;
struct clk *cpu_clk;
struct clk *pllp_clk;
struct clk *pllx_clk;
@@ -60,14 +58,6 @@ out:
return ret;
}
-static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
-{
- clk_set_parent(priv->cpu_clk, priv->pllp_clk);
- clk_disable_unprepare(priv->dfll_clk);
- regulator_sync_voltage(priv->vdd_cpu_reg);
- clk_set_parent(priv->cpu_clk, priv->pllx_clk);
-}
-
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
struct tegra124_cpufreq_priv *priv;
@@ -88,16 +78,10 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (!np)
return -ENODEV;
- priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu");
- if (IS_ERR(priv->vdd_cpu_reg)) {
- ret = PTR_ERR(priv->vdd_cpu_reg);
- goto out_put_np;
- }
-
priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
if (IS_ERR(priv->cpu_clk)) {
ret = PTR_ERR(priv->cpu_clk);
- goto out_put_vdd_cpu_reg;
+ goto out_put_np;
}
priv->dfll_clk = of_clk_get_by_name(np, "dfll");
@@ -129,15 +113,13 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_device_register_full(&cpufreq_dt_devinfo);
if (IS_ERR(priv->cpufreq_dt_pdev)) {
ret = PTR_ERR(priv->cpufreq_dt_pdev);
- goto out_switch_to_pllx;
+ goto out_put_pllp_clk;
}
platform_set_drvdata(pdev, priv);
return 0;
-out_switch_to_pllx:
- tegra124_cpu_switch_to_pllx(priv);
out_put_pllp_clk:
clk_put(priv->pllp_clk);
out_put_pllx_clk:
@@ -146,34 +128,15 @@ out_put_dfll_clk:
clk_put(priv->dfll_clk);
out_put_cpu_clk:
clk_put(priv->cpu_clk);
-out_put_vdd_cpu_reg:
- regulator_put(priv->vdd_cpu_reg);
out_put_np:
of_node_put(np);
return ret;
}
-static int tegra124_cpufreq_remove(struct platform_device *pdev)
-{
- struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev);
-
- platform_device_unregister(priv->cpufreq_dt_pdev);
- tegra124_cpu_switch_to_pllx(priv);
-
- clk_put(priv->pllp_clk);
- clk_put(priv->pllx_clk);
- clk_put(priv->dfll_clk);
- clk_put(priv->cpu_clk);
- regulator_put(priv->vdd_cpu_reg);
-
- return 0;
-}
-
static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
.probe = tegra124_cpufreq_probe,
- .remove = tegra124_cpufreq_remove,
};
static int __init tegra_cpufreq_init(void)
@@ -181,7 +144,8 @@ static int __init tegra_cpufreq_init(void)
int ret;
struct platform_device *pdev;
- if (!of_machine_is_compatible("nvidia,tegra124"))
+ if (!(of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
return -ENODEV;
/*
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
index 97f5424dbac9..4b56a587dacd 100644
--- a/drivers/firmware/imx/misc.c
+++ b/drivers/firmware/imx/misc.c
@@ -18,6 +18,14 @@ struct imx_sc_msg_req_misc_set_ctrl {
u16 resource;
} __packed;
+struct imx_sc_msg_req_cpu_start {
+ struct imx_sc_rpc_msg hdr;
+ u32 address_hi;
+ u32 address_lo;
+ u16 resource;
+ u8 enable;
+} __packed;
+
struct imx_sc_msg_req_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 ctrl;
@@ -97,3 +105,33 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource,
return 0;
}
EXPORT_SYMBOL(imx_sc_misc_get_control);
+
+/*
+ * This function starts/stops a CPU identified by @resource
+ *
+ * @param[in] ipc IPC handle
+ * @param[in] resource resource the control is associated with
+ * @param[in] enable true for start, false for stop
+ * @param[in] phys_addr initial instruction address to be executed
+ *
+ * @return Returns 0 for success and < 0 for errors.
+ */
+int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource,
+ bool enable, u64 phys_addr)
+{
+ struct imx_sc_msg_req_cpu_start msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_CPU_START;
+ hdr->size = 4;
+
+ msg.address_hi = phys_addr >> 32;
+ msg.address_lo = phys_addr;
+ msg.resource = resource;
+ msg.enable = enable;
+
+ return imx_scu_call_rpc(ipc, &msg, true);
+}
+EXPORT_SYMBOL(imx_sc_pm_cpu_start);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 407245f2efd0..39a94c7177fc 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -322,6 +322,7 @@ static int imx_sc_pd_probe(struct platform_device *pdev)
static const struct of_device_id imx_sc_pd_match[] = {
{ .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd},
+ { .compatible = "fsl,scu-pd", &imx8qxp_scu_pd},
{ /* sentinel */ }
};
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index a13558154ac3..61be15d9df7d 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -238,6 +238,16 @@ static int rpi_firmware_probe(struct platform_device *pdev)
return 0;
}
+static void rpi_firmware_shutdown(struct platform_device *pdev)
+{
+ struct rpi_firmware *fw = platform_get_drvdata(pdev);
+
+ if (!fw)
+ return;
+
+ rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0);
+}
+
static int rpi_firmware_remove(struct platform_device *pdev)
{
struct rpi_firmware *fw = platform_get_drvdata(pdev);
@@ -278,6 +288,7 @@ static struct platform_driver rpi_firmware_driver = {
.of_match_table = rpi_firmware_of_match,
},
.probe = rpi_firmware_probe,
+ .shutdown = rpi_firmware_shutdown,
.remove = rpi_firmware_remove,
};
module_platform_driver(rpi_firmware_driver);
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
index 1b826dcca719..676b01caff05 100644
--- a/drivers/firmware/tegra/Makefile
+++ b/drivers/firmware/tegra/Makefile
@@ -1,4 +1,7 @@
tegra-bpmp-y = bpmp.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o
+tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o
tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o
obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o
obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h
new file mode 100644
index 000000000000..54d560c48398
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-private.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H
+
+#include <soc/tegra/bpmp.h>
+
+struct tegra_bpmp_ops {
+ int (*init)(struct tegra_bpmp *bpmp);
+ void (*deinit)(struct tegra_bpmp *bpmp);
+ bool (*is_response_ready)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_ready)(struct tegra_bpmp_channel *channel);
+ int (*ack_response)(struct tegra_bpmp_channel *channel);
+ int (*ack_request)(struct tegra_bpmp_channel *channel);
+ bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel);
+ bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel);
+ int (*post_response)(struct tegra_bpmp_channel *channel);
+ int (*post_request)(struct tegra_bpmp_channel *channel);
+ int (*ring_doorbell)(struct tegra_bpmp *bpmp);
+ int (*resume)(struct tegra_bpmp *bpmp);
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+extern const struct tegra_bpmp_ops tegra186_bpmp_ops;
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_bpmp_ops tegra210_bpmp_ops;
+#endif
+
+#endif
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
new file mode 100644
index 000000000000..ea308751635f
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#include "bpmp-private.h"
+
+struct tegra186_bpmp {
+ struct tegra_bpmp *parent;
+
+ struct {
+ struct gen_pool *pool;
+ dma_addr_t phys;
+ void *virt;
+ } tx, rx;
+
+ struct {
+ struct mbox_client client;
+ struct mbox_chan *channel;
+ } mbox;
+};
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ struct tegra186_bpmp *priv;
+
+ priv = container_of(client, struct tegra186_bpmp, mbox.client);
+
+ return priv->parent;
+}
+
+static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel)
+{
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ int err;
+
+ err = mbox_send_message(priv->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(priv->mbox.channel, 0);
+
+ return 0;
+}
+
+static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ struct tegra186_bpmp *priv = bpmp->priv;
+
+ if (WARN_ON(priv->mbox.channel == NULL))
+ return;
+
+ tegra186_bpmp_ring_doorbell(bpmp);
+}
+
+static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ priv->rx.virt + offset, priv->rx.phys + offset,
+ priv->tx.virt + offset, priv->tx.phys + offset,
+ 1, message_size, tegra186_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static void mbox_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+
+ tegra_bpmp_handle_rx(bpmp);
+}
+
+static int tegra186_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+ priv->parent = bpmp;
+
+ priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0);
+ if (!priv->tx.pool) {
+ dev_err(bpmp->dev, "TX shmem pool not found\n");
+ return -ENOMEM;
+ }
+
+ priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys);
+ if (!priv->tx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1);
+ if (!priv->rx.pool) {
+ dev_err(bpmp->dev, "RX shmem pool not found\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys);
+ if (!priv->rx.virt) {
+ dev_err(bpmp->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ goto free_rx;
+
+ err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ goto cleanup_tx_channel;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ priv->mbox.client.dev = bpmp->dev;
+ priv->mbox.client.rx_callback = mbox_handle_rx;
+ priv->mbox.client.tx_block = false;
+ priv->mbox.client.knows_txdone = false;
+
+ priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0);
+ if (IS_ERR(priv->mbox.channel)) {
+ err = PTR_ERR(priv->mbox.channel);
+ dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+
+cleanup_channels:
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ if (!bpmp->threaded_channels[i].bpmp)
+ continue;
+
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+ }
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+cleanup_tx_channel:
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+free_rx:
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+free_tx:
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+
+ return err;
+}
+
+static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp)
+{
+ struct tegra186_bpmp *priv = bpmp->priv;
+ unsigned int i;
+
+ mbox_free_channel(priv->mbox.channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
+
+ tegra186_bpmp_channel_cleanup(bpmp->rx_channel);
+ tegra186_bpmp_channel_cleanup(bpmp->tx_channel);
+
+ gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096);
+ gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096);
+}
+
+static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp)
+{
+ unsigned int i;
+
+ /* reset message channels */
+ tegra186_bpmp_channel_reset(bpmp->tx_channel);
+ tegra186_bpmp_channel_reset(bpmp->rx_channel);
+
+ for (i = 0; i < bpmp->threaded.count; i++)
+ tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra186_bpmp_ops = {
+ .init = tegra186_bpmp_init,
+ .deinit = tegra186_bpmp_deinit,
+ .is_response_ready = tegra186_bpmp_is_message_ready,
+ .is_request_ready = tegra186_bpmp_is_message_ready,
+ .ack_response = tegra186_bpmp_ack_message,
+ .ack_request = tegra186_bpmp_ack_message,
+ .is_response_channel_free = tegra186_bpmp_is_channel_free,
+ .is_request_channel_free = tegra186_bpmp_is_channel_free,
+ .post_response = tegra186_bpmp_post_message,
+ .post_request = tegra186_bpmp_post_message,
+ .ring_doorbell = tegra186_bpmp_ring_doorbell,
+ .resume = tegra186_bpmp_resume,
+};
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
new file mode 100644
index 000000000000..ae15940a078e
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, NVIDIA CORPORATION.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <soc/tegra/bpmp.h>
+
+#include "bpmp-private.h"
+
+#define TRIGGER_OFFSET 0x000
+#define RESULT_OFFSET(id) (0xc00 + id * 4)
+#define TRIGGER_ID_SHIFT 16
+#define TRIGGER_CMD_GET 4
+
+#define STA_OFFSET 0
+#define SET_OFFSET 4
+#define CLR_OFFSET 8
+
+#define CH_MASK(ch) (0x3 << ((ch) * 2))
+#define SL_SIGL(ch) (0x0 << ((ch) * 2))
+#define SL_QUED(ch) (0x1 << ((ch) * 2))
+#define MA_FREE(ch) (0x2 << ((ch) * 2))
+#define MA_ACKD(ch) (0x3 << ((ch) * 2))
+
+struct tegra210_bpmp {
+ void __iomem *atomics;
+ void __iomem *arb_sema;
+ struct irq_data *tx_irq_data;
+};
+
+static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+
+ return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index);
+}
+
+static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index);
+}
+
+static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index);
+}
+
+static bool
+tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index);
+}
+
+static bool
+tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned int index = channel->index;
+
+ return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index);
+}
+
+static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index),
+ priv->arb_sema + CLR_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ struct tegra210_bpmp *priv = channel->bpmp->priv;
+
+ __raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET);
+
+ return 0;
+}
+
+static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ struct irq_data *irq_data = priv->tx_irq_data;
+
+ /*
+ * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of
+ * available messages
+ */
+ if (irq_data->chip->irq_retrigger)
+ return irq_data->chip->irq_retrigger(irq_data);
+
+ return -EINVAL;
+}
+
+static irqreturn_t rx_irq(int irq, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+
+ tegra_bpmp_handle_rx(bpmp);
+
+ return IRQ_HANDLED;
+}
+
+static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ struct tegra210_bpmp *priv = bpmp->priv;
+ u32 address;
+ void *p;
+
+ /* Retrieve channel base address from BPMP */
+ writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET,
+ priv->atomics + TRIGGER_OFFSET);
+ address = readl(priv->atomics + RESULT_OFFSET(index));
+
+ p = devm_ioremap(bpmp->dev, address, 0x80);
+ if (!p)
+ return -ENOMEM;
+
+ channel->ib = p;
+ channel->ob = p;
+ channel->index = index;
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
+{
+ struct platform_device *pdev = to_platform_device(bpmp->dev);
+ struct tegra210_bpmp *priv;
+ struct resource *res;
+ unsigned int i;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ bpmp->priv = priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->atomics))
+ return PTR_ERR(priv->atomics);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->arb_sema))
+ return PTR_ERR(priv->arb_sema);
+
+ err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp,
+ bpmp->soc->channels.cpu_tx.offset);
+ if (err < 0)
+ return err;
+
+ err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp,
+ bpmp->soc->channels.cpu_rx.offset);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < bpmp->threaded.count; i++) {
+ unsigned int index = bpmp->soc->channels.thread.offset + i;
+
+ err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i],
+ bpmp, index);
+ if (err < 0)
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "tx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get TX IRQ: %d\n", err);
+ return err;
+ }
+
+ priv->tx_irq_data = irq_get_irq_data(err);
+ if (!priv->tx_irq_data) {
+ dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n");
+ return err;
+ }
+
+ err = platform_get_irq_byname(pdev, "rx");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get rx IRQ: %d\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, err, rx_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+const struct tegra_bpmp_ops tegra210_bpmp_ops = {
+ .init = tegra210_bpmp_init,
+ .is_response_ready = tegra210_bpmp_is_response_ready,
+ .is_request_ready = tegra210_bpmp_is_request_ready,
+ .ack_response = tegra210_bpmp_ack_response,
+ .ack_request = tegra210_bpmp_ack_request,
+ .is_response_channel_free = tegra210_bpmp_is_response_channel_free,
+ .is_request_channel_free = tegra210_bpmp_is_request_channel_free,
+ .post_response = tegra210_bpmp_post_response,
+ .post_request = tegra210_bpmp_post_request,
+ .ring_doorbell = tegra210_bpmp_ring_doorbell,
+};
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 689478b92bce..dd775e8ba5a0 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -26,6 +26,8 @@
#include <soc/tegra/bpmp-abi.h>
#include <soc/tegra/ivc.h>
+#include "bpmp-private.h"
+
#define MSG_ACK BIT(0)
#define MSG_RING BIT(1)
#define TAG_SZ 32
@@ -36,6 +38,14 @@ mbox_client_to_bpmp(struct mbox_client *client)
return container_of(client, struct tegra_bpmp, mbox.client);
}
+static inline const struct tegra_bpmp_ops *
+channel_to_ops(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+
+ return bpmp->soc->ops;
+}
+
struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
{
struct platform_device *pdev;
@@ -96,22 +106,21 @@ static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
(msg->rx.size == 0 || msg->rx.data);
}
-static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
{
- void *frame;
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- frame = tegra_ivc_read_get_next_frame(channel->ivc);
- if (IS_ERR(frame)) {
- channel->ib = NULL;
- return false;
- }
+ return ops->is_response_ready(channel);
+}
- channel->ib = frame;
+static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- return true;
+ return ops->is_request_ready(channel);
}
-static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t end;
@@ -119,29 +128,45 @@ static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
end = ktime_add_us(ktime_get(), timeout);
do {
- if (tegra_bpmp_master_acked(channel))
+ if (tegra_bpmp_is_response_ready(channel))
return 0;
} while (ktime_before(ktime_get(), end));
return -ETIMEDOUT;
}
-static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
{
- void *frame;
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- frame = tegra_ivc_write_get_next_frame(channel->ivc);
- if (IS_ERR(frame)) {
- channel->ob = NULL;
- return false;
- }
+ return ops->ack_response(channel);
+}
- channel->ob = frame;
+static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
- return true;
+ return ops->ack_request(channel);
}
-static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+static bool
+tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_request_channel_free(channel);
+}
+
+static bool
+tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->is_response_channel_free(channel);
+}
+
+static int
+tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
{
unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
ktime_t start, now;
@@ -149,7 +174,7 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
start = ns_to_ktime(local_clock());
do {
- if (tegra_bpmp_master_free(channel))
+ if (tegra_bpmp_is_request_channel_free(channel))
return 0;
now = ns_to_ktime(local_clock());
@@ -158,6 +183,25 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
return -ETIMEDOUT;
}
+static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_request(channel);
+}
+
+static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
+{
+ const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
+
+ return ops->post_response(channel);
+}
+
+static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
+{
+ return bpmp->soc->ops->ring_doorbell(bpmp);
+}
+
static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
void *data, size_t size, int *ret)
{
@@ -166,7 +210,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
if (data && size > 0)
memcpy(data, channel->ib->data, size);
- err = tegra_ivc_read_advance(channel->ivc);
+ err = tegra_bpmp_ack_response(channel);
if (err < 0)
return err;
@@ -210,7 +254,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
if (data && size > 0)
memcpy(channel->ob->data, data, size);
- return tegra_ivc_write_advance(channel->ivc);
+ return tegra_bpmp_post_request(channel);
}
static struct tegra_bpmp_channel *
@@ -238,7 +282,7 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
channel = &bpmp->threaded_channels[index];
- if (!tegra_bpmp_master_free(channel)) {
+ if (!tegra_bpmp_is_request_channel_free(channel)) {
err = -EBUSY;
goto unlock;
}
@@ -270,7 +314,7 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
{
int err;
- err = tegra_bpmp_wait_master_free(channel);
+ err = tegra_bpmp_wait_request_channel_free(channel);
if (err < 0)
return err;
@@ -302,13 +346,11 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
spin_unlock(&bpmp->atomic_tx_lock);
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
- mbox_client_txdone(bpmp->mbox.channel, 0);
-
- err = tegra_bpmp_wait_ack(channel);
+ err = tegra_bpmp_wait_response(channel);
if (err < 0)
return err;
@@ -335,12 +377,10 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
if (IS_ERR(channel))
return PTR_ERR(channel);
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (err < 0)
return err;
- mbox_client_txdone(bpmp->mbox.channel, 0);
-
timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
err = wait_for_completion_timeout(&channel->completion, timeout);
@@ -369,38 +409,34 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
{
unsigned long flags = channel->ib->flags;
struct tegra_bpmp *bpmp = channel->bpmp;
- struct tegra_bpmp_mb_data *frame;
int err;
if (WARN_ON(size > MSG_DATA_MIN_SZ))
return;
- err = tegra_ivc_read_advance(channel->ivc);
+ err = tegra_bpmp_ack_request(channel);
if (WARN_ON(err < 0))
return;
if ((flags & MSG_ACK) == 0)
return;
- frame = tegra_ivc_write_get_next_frame(channel->ivc);
- if (WARN_ON(IS_ERR(frame)))
+ if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
return;
- frame->code = code;
+ channel->ob->code = code;
if (data && size > 0)
- memcpy(frame->data, data, size);
+ memcpy(channel->ob->data, data, size);
- err = tegra_ivc_write_advance(channel->ivc);
+ err = tegra_bpmp_post_response(channel);
if (WARN_ON(err < 0))
return;
if (flags & MSG_RING) {
- err = mbox_send_message(bpmp->mbox.channel, NULL);
+ err = tegra_bpmp_ring_doorbell(bpmp);
if (WARN_ON(err < 0))
return;
-
- mbox_client_txdone(bpmp->mbox.channel, 0);
}
}
EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
@@ -627,9 +663,8 @@ static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
complete(&channel->completion);
}
-static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
{
- struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
struct tegra_bpmp_channel *channel;
unsigned int i, count;
unsigned long *busy;
@@ -638,7 +673,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
count = bpmp->soc->channels.thread.count;
busy = bpmp->threaded.busy;
- if (tegra_bpmp_master_acked(channel))
+ if (tegra_bpmp_is_request_ready(channel))
tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
spin_lock(&bpmp->lock);
@@ -648,7 +683,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
channel = &bpmp->threaded_channels[i];
- if (tegra_bpmp_master_acked(channel)) {
+ if (tegra_bpmp_is_response_ready(channel)) {
tegra_bpmp_channel_signal(channel);
clear_bit(i, busy);
}
@@ -657,74 +692,9 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
spin_unlock(&bpmp->lock);
}
-static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
-{
- struct tegra_bpmp *bpmp = data;
- int err;
-
- if (WARN_ON(bpmp->mbox.channel == NULL))
- return;
-
- err = mbox_send_message(bpmp->mbox.channel, NULL);
- if (err < 0)
- return;
-
- mbox_client_txdone(bpmp->mbox.channel, 0);
-}
-
-static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
- struct tegra_bpmp *bpmp,
- unsigned int index)
-{
- size_t message_size, queue_size;
- unsigned int offset;
- int err;
-
- channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
- GFP_KERNEL);
- if (!channel->ivc)
- return -ENOMEM;
-
- message_size = tegra_ivc_align(MSG_MIN_SZ);
- queue_size = tegra_ivc_total_queue_size(message_size);
- offset = queue_size * index;
-
- err = tegra_ivc_init(channel->ivc, NULL,
- bpmp->rx.virt + offset, bpmp->rx.phys + offset,
- bpmp->tx.virt + offset, bpmp->tx.phys + offset,
- 1, message_size, tegra_bpmp_ivc_notify,
- bpmp);
- if (err < 0) {
- dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
- index, err);
- return err;
- }
-
- init_completion(&channel->completion);
- channel->bpmp = bpmp;
-
- return 0;
-}
-
-static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
-{
- /* reset the channel state */
- tegra_ivc_reset(channel->ivc);
-
- /* sync the channel state with BPMP */
- while (tegra_ivc_notified(channel->ivc))
- ;
-}
-
-static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
-{
- tegra_ivc_cleanup(channel->ivc);
-}
-
static int tegra_bpmp_probe(struct platform_device *pdev)
{
struct tegra_bpmp *bpmp;
- unsigned int i;
char tag[TAG_SZ];
size_t size;
int err;
@@ -736,32 +706,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
bpmp->soc = of_device_get_match_data(&pdev->dev);
bpmp->dev = &pdev->dev;
- bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
- if (!bpmp->tx.pool) {
- dev_err(&pdev->dev, "TX shmem pool not found\n");
- return -ENOMEM;
- }
-
- bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
- if (!bpmp->tx.virt) {
- dev_err(&pdev->dev, "failed to allocate from TX pool\n");
- return -ENOMEM;
- }
-
- bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
- if (!bpmp->rx.pool) {
- dev_err(&pdev->dev, "RX shmem pool not found\n");
- err = -ENOMEM;
- goto free_tx;
- }
-
- bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
- if (!bpmp->rx.virt) {
- dev_err(&pdev->dev, "failed to allocate from RX pool\n");
- err = -ENOMEM;
- goto free_tx;
- }
-
INIT_LIST_HEAD(&bpmp->mrqs);
spin_lock_init(&bpmp->lock);
@@ -771,81 +715,38 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!bpmp->threaded.allocated) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->threaded.allocated)
+ return -ENOMEM;
bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!bpmp->threaded.busy) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->threaded.busy)
+ return -ENOMEM;
spin_lock_init(&bpmp->atomic_tx_lock);
bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
GFP_KERNEL);
- if (!bpmp->tx_channel) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->tx_channel)
+ return -ENOMEM;
bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
GFP_KERNEL);
- if (!bpmp->rx_channel) {
- err = -ENOMEM;
- goto free_rx;
- }
+ if (!bpmp->rx_channel)
+ return -ENOMEM;
bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
sizeof(*bpmp->threaded_channels),
GFP_KERNEL);
- if (!bpmp->threaded_channels) {
- err = -ENOMEM;
- goto free_rx;
- }
-
- err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp,
- bpmp->soc->channels.cpu_tx.offset);
- if (err < 0)
- goto free_rx;
+ if (!bpmp->threaded_channels)
+ return -ENOMEM;
- err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp,
- bpmp->soc->channels.cpu_rx.offset);
+ err = bpmp->soc->ops->init(bpmp);
if (err < 0)
- goto cleanup_tx_channel;
-
- for (i = 0; i < bpmp->threaded.count; i++) {
- err = tegra_bpmp_channel_init(
- &bpmp->threaded_channels[i], bpmp,
- bpmp->soc->channels.thread.offset + i);
- if (err < 0)
- goto cleanup_threaded_channels;
- }
-
- /* mbox registration */
- bpmp->mbox.client.dev = &pdev->dev;
- bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
- bpmp->mbox.client.tx_block = false;
- bpmp->mbox.client.knows_txdone = false;
-
- bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
- if (IS_ERR(bpmp->mbox.channel)) {
- err = PTR_ERR(bpmp->mbox.channel);
- dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
- goto cleanup_threaded_channels;
- }
-
- /* reset message channels */
- tegra_bpmp_channel_reset(bpmp->tx_channel);
- tegra_bpmp_channel_reset(bpmp->rx_channel);
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
+ return err;
err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
tegra_bpmp_mrq_handle_ping, bpmp);
if (err < 0)
- goto free_mbox;
+ goto deinit;
err = tegra_bpmp_ping(bpmp);
if (err < 0) {
@@ -867,17 +768,23 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
if (err < 0)
goto free_mrq;
- err = tegra_bpmp_init_clocks(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) {
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
- err = tegra_bpmp_init_resets(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) {
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
- err = tegra_bpmp_init_powergates(bpmp);
- if (err < 0)
- goto free_mrq;
+ if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) {
+ err = tegra_bpmp_init_powergates(bpmp);
+ if (err < 0)
+ goto free_mrq;
+ }
err = tegra_bpmp_init_debugfs(bpmp);
if (err < 0)
@@ -887,41 +794,27 @@ static int tegra_bpmp_probe(struct platform_device *pdev)
free_mrq:
tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
-free_mbox:
- mbox_free_channel(bpmp->mbox.channel);
-cleanup_threaded_channels:
- for (i = 0; i < bpmp->threaded.count; i++) {
- if (bpmp->threaded_channels[i].bpmp)
- tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]);
- }
+deinit:
+ if (bpmp->soc->ops->deinit)
+ bpmp->soc->ops->deinit(bpmp);
- tegra_bpmp_channel_cleanup(bpmp->rx_channel);
-cleanup_tx_channel:
- tegra_bpmp_channel_cleanup(bpmp->tx_channel);
-free_rx:
- gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
-free_tx:
- gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
return err;
}
static int __maybe_unused tegra_bpmp_resume(struct device *dev)
{
struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
- unsigned int i;
-
- /* reset message channels */
- tegra_bpmp_channel_reset(bpmp->tx_channel);
- tegra_bpmp_channel_reset(bpmp->rx_channel);
-
- for (i = 0; i < bpmp->threaded.count; i++)
- tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]);
- return 0;
+ if (bpmp->soc->ops->resume)
+ return bpmp->soc->ops->resume(bpmp);
+ else
+ return 0;
}
static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume);
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
static const struct tegra_bpmp_soc tegra186_soc = {
.channels = {
.cpu_tx = {
@@ -938,11 +831,42 @@ static const struct tegra_bpmp_soc tegra186_soc = {
.timeout = 0,
},
},
+ .ops = &tegra186_bpmp_ops,
.num_resets = 193,
};
+#endif
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_bpmp_soc tegra210_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 1,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 4,
+ .count = 1,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 8,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .ops = &tegra210_bpmp_ops,
+};
+#endif
static const struct of_device_id tegra_bpmp_match[] = {
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
{ .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+ { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
+#endif
{ }
};
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 69ed1464175c..3fbbb61012c4 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -146,25 +146,8 @@ static int ti_sci_debug_show(struct seq_file *s, void *unused)
return 0;
}
-/**
- * ti_sci_debug_open() - debug file open
- * @inode: inode pointer
- * @file: file pointer
- *
- * Return: result of single_open
- */
-static int ti_sci_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ti_sci_debug_show, inode->i_private);
-}
-
-/* log file operations */
-static const struct file_operations ti_sci_debug_fops = {
- .open = ti_sci_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+/* Provide the log file operations interface*/
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
/**
* ti_sci_debugfs_create() - Create log debug file
diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig
index 8f44b9cd295a..bd33bbf70daf 100644
--- a/drivers/firmware/xilinx/Kconfig
+++ b/drivers/firmware/xilinx/Kconfig
@@ -6,6 +6,7 @@ menu "Zynq MPSoC Firmware Drivers"
config ZYNQMP_FIRMWARE
bool "Enable Xilinx Zynq MPSoC firmware interface"
+ select MFD_CORE
help
Firmware interface driver is used by different
drivers to communicate with the firmware for
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 9a1c72a9280f..98f936125643 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -14,6 +14,7 @@
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -23,6 +24,12 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static const struct mfd_cell firmware_devs[] = {
+ {
+ .name = "zynqmp_power_controller",
+ },
+};
+
/**
* zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes
* @ret_status: PMUFW return code
@@ -187,6 +194,29 @@ static int zynqmp_pm_get_api_version(u32 *version)
}
/**
+ * zynqmp_pm_get_chipid - Get silicon ID registers
+ * @idcode: IDCODE register
+ * @version: version register
+ *
+ * Return: Returns the status of the operation and the idcode and version
+ * registers in @idcode and @version.
+ */
+static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!idcode || !version)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload);
+ *idcode = ret_payload[1];
+ *version = ret_payload[2];
+
+ return ret;
+}
+
+/**
* zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version
* @version: Returned version value
*
@@ -469,8 +499,129 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2,
arg1, arg2, out);
}
+/**
+ * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release)
+ * @reset: Reset to be configured
+ * @assert_flag: Flag stating should reset be asserted (1) or
+ * released (0)
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+ const enum zynqmp_pm_reset_action assert_flag)
+{
+ return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag,
+ 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_reset_get_status - Get status of the reset
+ * @reset: Reset whose status should be returned
+ * @status: Returned status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
+ u32 *status)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0,
+ 0, 0, ret_payload);
+ *status = ret_payload[1];
+
+ return ret;
+}
+
+/**
+ * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
+ * master has initialized its own power management
+ *
+ * This API function is to be used for notify the power management controller
+ * about the completed power management initialization.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_init_finalize(void)
+{
+ return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_suspend_mode() - Set system suspend mode
+ * @mode: Mode to set for system suspend
+ *
+ * This API function is used to set mode of system suspend.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_suspend_mode(u32 mode)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_request_node() - Request a node with specific capabilities
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This function is used by master to request particular node from firmware.
+ * Every master must request node before using it.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities,
+ qos, ack, NULL);
+}
+
+/**
+ * zynqmp_pm_release_node() - Release a node
+ * @node: Node ID of the slave
+ *
+ * This function is used by master to inform firmware that master
+ * has released node. Once released, master must not use that node
+ * without re-request.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_release_node(const u32 node)
+{
+ return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL);
+}
+
+/**
+ * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves
+ * @node: Node ID of the slave
+ * @capabilities: Requested capabilities of the slave
+ * @qos: Quality of service (not supported)
+ * @ack: Flag to specify whether acknowledge is requested
+ *
+ * This API function is to be used for slaves a PU already has requested
+ * to change its capabilities.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
+ const u32 qos,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities,
+ qos, ack, NULL);
+}
+
static const struct zynqmp_eemi_ops eemi_ops = {
.get_api_version = zynqmp_pm_get_api_version,
+ .get_chipid = zynqmp_pm_get_chipid,
.query_data = zynqmp_pm_query_data,
.clock_enable = zynqmp_pm_clock_enable,
.clock_disable = zynqmp_pm_clock_disable,
@@ -482,6 +633,13 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.clock_setparent = zynqmp_pm_clock_setparent,
.clock_getparent = zynqmp_pm_clock_getparent,
.ioctl = zynqmp_pm_ioctl,
+ .reset_assert = zynqmp_pm_reset_assert,
+ .reset_get_status = zynqmp_pm_reset_get_status,
+ .init_finalize = zynqmp_pm_init_finalize,
+ .set_suspend_mode = zynqmp_pm_set_suspend_mode,
+ .request_node = zynqmp_pm_request_node,
+ .release_node = zynqmp_pm_release_node,
+ .set_requirement = zynqmp_pm_set_requirement,
};
/**
@@ -538,11 +696,19 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
+ ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
+ ARRAY_SIZE(firmware_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret);
+ return ret;
+ }
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
static int zynqmp_firmware_remove(struct platform_device *pdev)
{
+ mfd_remove_devices(&pdev->dev);
zynqmp_pm_api_debugfs_exit();
return 0;
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 12980a4ad460..ee6fb6af655e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_ACT8945A) += act8945a.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
+obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
cros_ec_core-objs := cros_ec.o
diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c
new file mode 100644
index 000000000000..42fe67f1538e
--- /dev/null
+++ b/drivers/mfd/bcm2835-pm.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PM MFD driver for Broadcom BCM2835
+ *
+ * This driver binds to the PM block and creates the MFD device for
+ * the WDT and power drivers.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+static const struct mfd_cell bcm2835_pm_devs[] = {
+ { .name = "bcm2835-wdt" },
+};
+
+static const struct mfd_cell bcm2835_power_devs[] = {
+ { .name = "bcm2835-power" },
+};
+
+static int bcm2835_pm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct bcm2835_pm *pm;
+ int ret;
+
+ pm = devm_kzalloc(dev, sizeof(*pm), GFP_KERNEL);
+ if (!pm)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, pm);
+
+ pm->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pm->base))
+ return PTR_ERR(pm->base);
+
+ ret = devm_mfd_add_devices(dev, -1,
+ bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+
+ /* We'll use the presence of the AXI ASB regs in the
+ * bcm2835-pm binding as the key for whether we can reference
+ * the full PM register range and support power domains.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ pm->asb = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pm->asb))
+ return PTR_ERR(pm->asb);
+
+ ret = devm_mfd_add_devices(dev, -1,
+ bcm2835_power_devs,
+ ARRAY_SIZE(bcm2835_power_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_pm_of_match[] = {
+ { .compatible = "brcm,bcm2835-pm-wdt", },
+ { .compatible = "brcm,bcm2835-pm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match);
+
+static struct platform_driver bcm2835_pm_driver = {
+ .probe = bcm2835_pm_probe,
+ .driver = {
+ .name = "bcm2835-pm",
+ .of_match_table = bcm2835_pm_of_match,
+ },
+};
+module_platform_driver(bcm2835_pm_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 02bf20f51349..2ba49e959c3f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2468,9 +2468,14 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.type = DPNI_DEST_DPCON;
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
+ queue.flc.stash_control = 1;
+ queue.flc.value &= 0xFFFFFFFFFFFFFFC0;
+ /* 01 01 00 - data, annotation, flow context */
+ queue.flc.value |= 0x14;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, 0, fq->flowid,
- DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+ DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST |
+ DPNI_QUEUE_OPT_FLC,
&queue);
if (err) {
dev_err(dev, "dpni_set_queue(RX) failed\n");
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 0a7a470ee859..4ad846ceac7c 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -192,4 +192,14 @@ config SC27XX_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem-sc27xx-efuse.
+config NVMEM_ZYNQMP
+ bool "Xilinx ZYNQMP SoC nvmem firmware support"
+ depends on ARCH_ZYNQMP
+ help
+ This is a driver to access hardware related data like
+ soc revision, IDCODE... etc by using the firmware
+ interface.
+
+ If sure, say yes. If unsure, say no.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 4e8c61628f1a..2ece8ffffdda 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -41,3 +41,5 @@ obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o
nvmem-sc27xx-efuse-y := sc27xx-efuse.o
+obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
+nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
new file mode 100644
index 000000000000..490c8fcaec80
--- /dev/null
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define SILICON_REVISION_MASK 0xF
+
+struct zynqmp_nvmem_data {
+ struct device *dev;
+ struct nvmem_device *nvmem;
+};
+
+static int zynqmp_nvmem_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ int ret;
+ int idcode, version;
+ struct zynqmp_nvmem_data *priv = context;
+
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->get_chipid)
+ return -ENXIO;
+
+ ret = eemi_ops->get_chipid(&idcode, &version);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+
+ return 0;
+}
+
+static struct nvmem_config econfig = {
+ .name = "zynqmp-nvmem",
+ .owner = THIS_MODULE,
+ .word_size = 1,
+ .size = 1,
+ .read_only = true,
+};
+
+static const struct of_device_id zynqmp_nvmem_match[] = {
+ { .compatible = "xlnx,zynqmp-nvmem-fw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
+
+static int zynqmp_nvmem_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_nvmem_data *priv;
+
+ priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ econfig.dev = dev;
+ econfig.reg_read = zynqmp_nvmem_read;
+ econfig.priv = priv;
+
+ priv->nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(priv->nvmem);
+}
+
+static struct platform_driver zynqmp_nvmem_driver = {
+ .probe = zynqmp_nvmem_probe,
+ .driver = {
+ .name = "zynqmp-nvmem",
+ .of_match_table = zynqmp_nvmem_match,
+ },
+};
+
+module_platform_driver(zynqmp_nvmem_driver);
+
+MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>, Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("ZynqMP NVMEM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 18f1639dbc4a..e06a0ab05ad6 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -131,6 +131,24 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
/**
+ * dev_pm_opp_get_level() - Gets the level corresponding to an available opp
+ * @opp: opp for which level value has to be returned for
+ *
+ * Return: level read from device tree corresponding to the opp, else
+ * return 0.
+ */
+unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp)
+{
+ if (IS_ERR_OR_NULL(opp) || !opp->available) {
+ pr_err("%s: Invalid parameters\n", __func__);
+ return 0;
+ }
+
+ return opp->level;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_level);
+
+/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
*
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 06f0f632ec47..1779f2c93291 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -594,6 +594,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
new_opp->rate = (unsigned long)rate;
}
+ of_property_read_u32(np, "opp-level", &new_opp->level);
+
/* Check if the OPP supports hardware's hierarchy of versions or not */
if (!_opp_is_supported(dev, opp_table, np)) {
dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index e24d81497375..4458175aa661 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -60,6 +60,7 @@ extern struct list_head opp_tables;
* @suspend: true if suspend OPP
* @pstate: Device's power domain's performance state.
* @rate: Frequency in hertz
+ * @level: Performance level
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
@@ -80,6 +81,7 @@ struct dev_pm_opp {
bool suspend;
unsigned int pstate;
unsigned long rate;
+ unsigned int level;
struct dev_pm_opp_supply *supplies;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 2e01bd833ffd..2c8c23db92fb 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -40,6 +40,14 @@ config RESET_BERLIN
help
This enables the reset controller driver for Marvell Berlin SoCs.
+config RESET_BRCMSTB
+ tristate "Broadcom STB reset controller"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ default ARCH_BRCMSTB
+ help
+ This enables the reset controller driver for Broadcom STB SoCs using
+ a SUN_TOP_CTRL_SW_INIT style controller.
+
config RESET_HSDK
bool "Synopsys HSDK Reset Driver"
depends on HAS_IOMEM
@@ -48,9 +56,9 @@ config RESET_HSDK
This enables the reset controller driver for HSDK board.
config RESET_IMX7
- bool "i.MX7 Reset Driver" if COMPILE_TEST
+ bool "i.MX7/8 Reset Driver" if COMPILE_TEST
depends on HAS_IOMEM
- default SOC_IMX7D
+ default SOC_IMX7D || (ARM64 && ARCH_MXC)
select MFD_SYSCON
help
This enables the reset controller driver for i.MX7 SoCs.
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index dc7874df78d9..61456b8f659c 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
+obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
@@ -26,4 +27,5 @@ obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
+obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o
diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
new file mode 100644
index 000000000000..a608f445dad6
--- /dev/null
+++ b/drivers/reset/reset-brcmstb.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Broadcom STB generic reset controller for SW_INIT style reset controller
+ *
+ * Author: Florian Fainelli <f.fainelli@gmail.com>
+ * Copyright (C) 2018 Broadcom
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+struct brcmstb_reset {
+ void __iomem *base;
+ struct reset_controller_dev rcdev;
+};
+
+#define SW_INIT_SET 0x00
+#define SW_INIT_CLEAR 0x04
+#define SW_INIT_STATUS 0x08
+
+#define SW_INIT_BIT(id) BIT((id) & 0x1f)
+#define SW_INIT_BANK(id) ((id) >> 5)
+
+/* A full bank contains extra registers that we are not utilizing but still
+ * qualify as a single bank.
+ */
+#define SW_INIT_BANK_SIZE 0x18
+
+static inline
+struct brcmstb_reset *to_brcmstb(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct brcmstb_reset, rcdev);
+}
+
+static int brcmstb_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_SET);
+
+ return 0;
+}
+
+static int brcmstb_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_CLEAR);
+ /* Maximum reset delay after de-asserting a line and seeing block
+ * operation is typically 14us for the worst case, build some slack
+ * here.
+ */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static int brcmstb_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE;
+ struct brcmstb_reset *priv = to_brcmstb(rcdev);
+
+ return readl_relaxed(priv->base + off + SW_INIT_STATUS) &
+ SW_INIT_BIT(id);
+}
+
+static const struct reset_control_ops brcmstb_reset_ops = {
+ .assert = brcmstb_reset_assert,
+ .deassert = brcmstb_reset_deassert,
+ .status = brcmstb_reset_status,
+};
+
+static int brcmstb_reset_probe(struct platform_device *pdev)
+{
+ struct device *kdev = &pdev->dev;
+ struct brcmstb_reset *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) ||
+ !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) {
+ dev_err(kdev, "incorrect register range\n");
+ return -EINVAL;
+ }
+
+ priv->base = devm_ioremap_resource(kdev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ dev_set_drvdata(kdev, priv);
+
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.nr_resets = DIV_ROUND_DOWN_ULL(resource_size(res),
+ SW_INIT_BANK_SIZE) * 32;
+ priv->rcdev.ops = &brcmstb_reset_ops;
+ priv->rcdev.of_node = kdev->of_node;
+ /* Use defaults: 1 cell and simple xlate function */
+
+ return devm_reset_controller_register(kdev, &priv->rcdev);
+}
+
+static const struct of_device_id brcmstb_reset_of_match[] = {
+ { .compatible = "brcm,brcmstb-reset" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver brcmstb_reset_driver = {
+ .probe = brcmstb_reset_probe,
+ .driver = {
+ .name = "brcmstb-reset",
+ .of_match_table = brcmstb_reset_of_match,
+ },
+};
+module_platform_driver(brcmstb_reset_driver);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB reset controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 77911fa8f31d..aed76e33a0a9 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -17,14 +17,27 @@
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/regmap.h>
#include <dt-bindings/reset/imx7-reset.h>
+#include <dt-bindings/reset/imx8mq-reset.h>
+
+struct imx7_src_signal {
+ unsigned int offset, bit;
+};
+
+struct imx7_src_variant {
+ const struct imx7_src_signal *signals;
+ unsigned int signals_num;
+ struct reset_control_ops ops;
+};
struct imx7_src {
struct reset_controller_dev rcdev;
struct regmap *regmap;
+ const struct imx7_src_signal *signals;
};
enum imx7_src_registers {
@@ -39,9 +52,14 @@ enum imx7_src_registers {
SRC_DDRC_RCR = 0x1000,
};
-struct imx7_src_signal {
- unsigned int offset, bit;
-};
+static int imx7_reset_update(struct imx7_src *imx7src,
+ unsigned long id, unsigned int value)
+{
+ const struct imx7_src_signal *signal = &imx7src->signals[id];
+
+ return regmap_update_bits(imx7src->regmap,
+ signal->offset, signal->bit, value);
+}
static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
[IMX7_RESET_A7_CORE_POR_RESET0] = { SRC_A7RCR0, BIT(0) },
@@ -81,8 +99,8 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct imx7_src *imx7src = to_imx7_src(rcdev);
- const struct imx7_src_signal *signal = &imx7_src_signals[id];
- unsigned int value = assert ? signal->bit : 0;
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
switch (id) {
case IMX7_RESET_PCIEPHY:
@@ -95,12 +113,11 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev,
break;
case IMX7_RESET_PCIE_CTRL_APPS_EN:
- value = (assert) ? 0 : signal->bit;
+ value = assert ? 0 : bit;
break;
}
- return regmap_update_bits(imx7src->regmap,
- signal->offset, signal->bit, value);
+ return imx7_reset_update(imx7src, id, value);
}
static int imx7_reset_assert(struct reset_controller_dev *rcdev,
@@ -115,9 +132,133 @@ static int imx7_reset_deassert(struct reset_controller_dev *rcdev,
return imx7_reset_set(rcdev, id, false);
}
-static const struct reset_control_ops imx7_reset_ops = {
- .assert = imx7_reset_assert,
- .deassert = imx7_reset_deassert,
+static const struct imx7_src_variant variant_imx7 = {
+ .signals = imx7_src_signals,
+ .signals_num = ARRAY_SIZE(imx7_src_signals),
+ .ops = {
+ .assert = imx7_reset_assert,
+ .deassert = imx7_reset_deassert,
+ },
+};
+
+enum imx8mq_src_registers {
+ SRC_A53RCR0 = 0x0004,
+ SRC_HDMI_RCR = 0x0030,
+ SRC_DISP_RCR = 0x0034,
+ SRC_GPU_RCR = 0x0040,
+ SRC_VPU_RCR = 0x0044,
+ SRC_PCIE2_RCR = 0x0048,
+ SRC_MIPIPHY1_RCR = 0x004c,
+ SRC_MIPIPHY2_RCR = 0x0050,
+ SRC_DDRC2_RCR = 0x1004,
+};
+
+static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = {
+ [IMX8MQ_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) },
+ [IMX8MQ_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) },
+ [IMX8MQ_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) },
+ [IMX8MQ_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) },
+ [IMX8MQ_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) },
+ [IMX8MQ_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) },
+ [IMX8MQ_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) },
+ [IMX8MQ_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) },
+ [IMX8MQ_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) },
+ [IMX8MQ_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) },
+ [IMX8MQ_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) },
+ [IMX8MQ_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) },
+ [IMX8MQ_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) },
+ [IMX8MQ_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) },
+ [IMX8MQ_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) },
+ [IMX8MQ_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) },
+ [IMX8MQ_RESET_SW_NON_SCLR_M4C_RST] = { SRC_M4RCR, BIT(0) },
+ [IMX8MQ_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) },
+ [IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) },
+ [IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) },
+ [IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) },
+ [IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) },
+ [IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR,
+ BIT(2) | BIT(1) },
+ [IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
+ [IMX8MQ_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
+ [IMX8MQ_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) },
+ [IMX8MQ_RESET_DISP_RESET] = { SRC_DISP_RCR, BIT(0) },
+ [IMX8MQ_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) },
+ [IMX8MQ_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) },
+ [IMX8MQ_RESET_PCIEPHY2] = { SRC_PCIE2_RCR,
+ BIT(2) | BIT(1) },
+ [IMX8MQ_RESET_PCIEPHY2_PERST] = { SRC_PCIE2_RCR, BIT(3) },
+ [IMX8MQ_RESET_PCIE2_CTRL_APPS_EN] = { SRC_PCIE2_RCR, BIT(6) },
+ [IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF] = { SRC_PCIE2_RCR, BIT(11) },
+ [IMX8MQ_RESET_MIPI_CSI1_CORE_RESET] = { SRC_MIPIPHY1_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET] = { SRC_MIPIPHY1_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_CSI1_ESC_RESET] = { SRC_MIPIPHY1_RCR, BIT(2) },
+ [IMX8MQ_RESET_MIPI_CSI2_CORE_RESET] = { SRC_MIPIPHY2_RCR, BIT(0) },
+ [IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET] = { SRC_MIPIPHY2_RCR, BIT(1) },
+ [IMX8MQ_RESET_MIPI_CSI2_ESC_RESET] = { SRC_MIPIPHY2_RCR, BIT(2) },
+ [IMX8MQ_RESET_DDRC1_PRST] = { SRC_DDRC_RCR, BIT(0) },
+ [IMX8MQ_RESET_DDRC1_CORE_RESET] = { SRC_DDRC_RCR, BIT(1) },
+ [IMX8MQ_RESET_DDRC1_PHY_RESET] = { SRC_DDRC_RCR, BIT(2) },
+ [IMX8MQ_RESET_DDRC2_PHY_RESET] = { SRC_DDRC2_RCR, BIT(0) },
+ [IMX8MQ_RESET_DDRC2_CORE_RESET] = { SRC_DDRC2_RCR, BIT(1) },
+ [IMX8MQ_RESET_DDRC2_PRST] = { SRC_DDRC2_RCR, BIT(2) },
+};
+
+static int imx8mq_reset_set(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
+{
+ struct imx7_src *imx7src = to_imx7_src(rcdev);
+ const unsigned int bit = imx7src->signals[id].bit;
+ unsigned int value = assert ? bit : 0;
+
+ switch (id) {
+ case IMX8MQ_RESET_PCIEPHY:
+ case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */
+ /*
+ * wait for more than 10us to release phy g_rst and
+ * btnrst
+ */
+ if (!assert)
+ udelay(10);
+ break;
+
+ case IMX8MQ_RESET_PCIE_CTRL_APPS_EN:
+ case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */
+ case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */
+ value = assert ? 0 : bit;
+ break;
+ }
+
+ return imx7_reset_update(imx7src, id, value);
+}
+
+static int imx8mq_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mq_reset_set(rcdev, id, true);
+}
+
+static int imx8mq_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return imx8mq_reset_set(rcdev, id, false);
+}
+
+static const struct imx7_src_variant variant_imx8mq = {
+ .signals = imx8mq_src_signals,
+ .signals_num = ARRAY_SIZE(imx8mq_src_signals),
+ .ops = {
+ .assert = imx8mq_reset_assert,
+ .deassert = imx8mq_reset_deassert,
+ },
};
static int imx7_reset_probe(struct platform_device *pdev)
@@ -125,11 +266,13 @@ static int imx7_reset_probe(struct platform_device *pdev)
struct imx7_src *imx7src;
struct device *dev = &pdev->dev;
struct regmap_config config = { .name = "src" };
+ const struct imx7_src_variant *variant = of_device_get_match_data(dev);
imx7src = devm_kzalloc(dev, sizeof(*imx7src), GFP_KERNEL);
if (!imx7src)
return -ENOMEM;
+ imx7src->signals = variant->signals;
imx7src->regmap = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(imx7src->regmap)) {
dev_err(dev, "Unable to get imx7-src regmap");
@@ -138,15 +281,16 @@ static int imx7_reset_probe(struct platform_device *pdev)
regmap_attach_dev(dev, imx7src->regmap, &config);
imx7src->rcdev.owner = THIS_MODULE;
- imx7src->rcdev.nr_resets = IMX7_RESET_NUM;
- imx7src->rcdev.ops = &imx7_reset_ops;
+ imx7src->rcdev.nr_resets = variant->signals_num;
+ imx7src->rcdev.ops = &variant->ops;
imx7src->rcdev.of_node = dev->of_node;
return devm_reset_controller_register(dev, &imx7src->rcdev);
}
static const struct of_device_id imx7_reset_dt_ids[] = {
- { .compatible = "fsl,imx7d-src", },
+ { .compatible = "fsl,imx7d-src", .data = &variant_imx7 },
+ { .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq },
{ /* sentinel */ },
};
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 318cfc51c441..96953992c2bb 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -11,6 +11,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/socfpga.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -18,7 +19,6 @@
#include "reset-simple.h"
#define SOCFPGA_NR_BANKS 8
-void __init socfpga_reset_init(void);
static int a10_reset_init(struct device_node *np)
{
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index db9a1a75523f..b06d724d8f21 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -18,6 +18,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
+#include <linux/reset/sunxi.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
new file mode 100644
index 000000000000..2ef1f13aa47b
--- /dev/null
+++ b/drivers/reset/reset-zynqmp.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NR_RESETS (ZYNQMP_PM_RESET_END - ZYNQMP_PM_RESET_START)
+#define ZYNQMP_RESET_ID ZYNQMP_PM_RESET_START
+
+struct zynqmp_reset_data {
+ struct reset_controller_dev rcdev;
+ const struct zynqmp_eemi_ops *eemi_ops;
+};
+
+static inline struct zynqmp_reset_data *
+to_zynqmp_reset_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct zynqmp_reset_data, rcdev);
+}
+
+static int zynqmp_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_ASSERT);
+}
+
+static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_RELEASE);
+}
+
+static int zynqmp_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+ int val, err;
+
+ err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int zynqmp_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev);
+
+ return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id,
+ PM_RESET_ACTION_PULSE);
+}
+
+static struct reset_control_ops zynqmp_reset_ops = {
+ .reset = zynqmp_reset_reset,
+ .assert = zynqmp_reset_assert,
+ .deassert = zynqmp_reset_deassert,
+ .status = zynqmp_reset_status,
+};
+
+static int zynqmp_reset_probe(struct platform_device *pdev)
+{
+ struct zynqmp_reset_data *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (!priv->eemi_ops)
+ return -ENXIO;
+
+ priv->rcdev.ops = &zynqmp_reset_ops;
+ priv->rcdev.owner = THIS_MODULE;
+ priv->rcdev.of_node = pdev->dev.of_node;
+ priv->rcdev.nr_resets = ZYNQMP_NR_RESETS;
+
+ return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
+}
+
+static const struct of_device_id zynqmp_reset_dt_ids[] = {
+ { .compatible = "xlnx,zynqmp-reset", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver zynqmp_reset_driver = {
+ .probe = zynqmp_reset_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = zynqmp_reset_dt_ids,
+ },
+};
+
+static int __init zynqmp_reset_init(void)
+{
+ return platform_driver_register(&zynqmp_reset_driver);
+}
+
+arch_initcall(zynqmp_reset_init);
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index fce33ca76bb6..be95a37c3fec 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -51,16 +51,30 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
{
struct device_node *canvas_node;
struct platform_device *canvas_pdev;
+ struct meson_canvas *canvas;
canvas_node = of_parse_phandle(dev->of_node, "amlogic,canvas", 0);
if (!canvas_node)
return ERR_PTR(-ENODEV);
canvas_pdev = of_find_device_by_node(canvas_node);
- if (!canvas_pdev)
+ if (!canvas_pdev) {
+ of_node_put(canvas_node);
return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ of_node_put(canvas_node);
+
+ /*
+ * If priv is NULL, it's probably because the canvas hasn't
+ * properly initialized. Bail out with -EINVAL because, in the
+ * current state, this driver probe cannot return -EPROBE_DEFER
+ */
+ canvas = dev_get_drvdata(&canvas_pdev->dev);
+ if (!canvas)
+ return ERR_PTR(-EINVAL);
- return dev_get_drvdata(&canvas_pdev->dev);
+ return canvas;
}
EXPORT_SYMBOL_GPL(meson_canvas_get);
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index daea191a66fa..19d4cbc93a17 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -165,6 +165,194 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = {
CLK_MSR_ID(82, "ge2d"),
};
+static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "a53_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(5, "gp1_pll"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(23, "mmc_clk"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(40, "mod_eth_tx_clk"),
+ CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmm_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(66, "audio_slv_lrclk_c"),
+ CLK_MSR_ID(67, "audio_slv_lrclk_b"),
+ CLK_MSR_ID(68, "audio_slv_lrclk_a"),
+ CLK_MSR_ID(69, "audio_slv_sclk_c"),
+ CLK_MSR_ID(70, "audio_slv_sclk_b"),
+ CLK_MSR_ID(71, "audio_slv_sclk_a"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(74, "wifi_beacon"),
+ CLK_MSR_ID(75, "tdmin_lb_lrcl"),
+ CLK_MSR_ID(76, "tdmin_lb_sclk"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(84, "audio_resample"),
+ CLK_MSR_ID(85, "audio_pdm_sys"),
+ CLK_MSR_ID(86, "audio_spdifout"),
+ CLK_MSR_ID(87, "audio_spdifin"),
+ CLK_MSR_ID(88, "audio_lrclk_f"),
+ CLK_MSR_ID(89, "audio_lrclk_e"),
+ CLK_MSR_ID(90, "audio_lrclk_d"),
+ CLK_MSR_ID(91, "audio_lrclk_c"),
+ CLK_MSR_ID(92, "audio_lrclk_b"),
+ CLK_MSR_ID(93, "audio_lrclk_a"),
+ CLK_MSR_ID(94, "audio_sclk_f"),
+ CLK_MSR_ID(95, "audio_sclk_e"),
+ CLK_MSR_ID(96, "audio_sclk_d"),
+ CLK_MSR_ID(97, "audio_sclk_c"),
+ CLK_MSR_ID(98, "audio_sclk_b"),
+ CLK_MSR_ID(99, "audio_sclk_a"),
+ CLK_MSR_ID(100, "audio_mclk_f"),
+ CLK_MSR_ID(101, "audio_mclk_e"),
+ CLK_MSR_ID(102, "audio_mclk_d"),
+ CLK_MSR_ID(103, "audio_mclk_c"),
+ CLK_MSR_ID(104, "audio_mclk_b"),
+ CLK_MSR_ID(105, "audio_mclk_a"),
+ CLK_MSR_ID(106, "pcie_refclk_n"),
+ CLK_MSR_ID(107, "pcie_refclk_p"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+};
+
+static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = {
+ CLK_MSR_ID(0, "ring_osc_out_ee_0"),
+ CLK_MSR_ID(1, "ring_osc_out_ee_1"),
+ CLK_MSR_ID(2, "ring_osc_out_ee_2"),
+ CLK_MSR_ID(3, "sys_cpu_ring_osc"),
+ CLK_MSR_ID(4, "gp0_pll"),
+ CLK_MSR_ID(6, "enci"),
+ CLK_MSR_ID(7, "clk81"),
+ CLK_MSR_ID(8, "encp"),
+ CLK_MSR_ID(9, "encl"),
+ CLK_MSR_ID(10, "vdac"),
+ CLK_MSR_ID(11, "eth_tx"),
+ CLK_MSR_ID(12, "hifi_pll"),
+ CLK_MSR_ID(13, "mod_tcon"),
+ CLK_MSR_ID(14, "fec_0"),
+ CLK_MSR_ID(15, "fec_1"),
+ CLK_MSR_ID(16, "fec_2"),
+ CLK_MSR_ID(17, "sys_pll_div16"),
+ CLK_MSR_ID(18, "sys_cpu_div16"),
+ CLK_MSR_ID(19, "lcd_an_ph2"),
+ CLK_MSR_ID(20, "rtc_osc_out"),
+ CLK_MSR_ID(21, "lcd_an_ph3"),
+ CLK_MSR_ID(22, "eth_phy_ref"),
+ CLK_MSR_ID(23, "mpll_50m"),
+ CLK_MSR_ID(24, "eth_125m"),
+ CLK_MSR_ID(25, "eth_rmii"),
+ CLK_MSR_ID(26, "sc_int"),
+ CLK_MSR_ID(27, "in_mac"),
+ CLK_MSR_ID(28, "sar_adc"),
+ CLK_MSR_ID(29, "pcie_inp"),
+ CLK_MSR_ID(30, "pcie_inn"),
+ CLK_MSR_ID(31, "mpll_test_out"),
+ CLK_MSR_ID(32, "vdec"),
+ CLK_MSR_ID(33, "sys_cpu_ring_osc_1"),
+ CLK_MSR_ID(34, "eth_mpll_50m"),
+ CLK_MSR_ID(35, "mali"),
+ CLK_MSR_ID(36, "hdmi_tx_pixel"),
+ CLK_MSR_ID(37, "cdac"),
+ CLK_MSR_ID(38, "vdin_meas"),
+ CLK_MSR_ID(39, "bt656"),
+ CLK_MSR_ID(41, "eth_rx_or_rmii"),
+ CLK_MSR_ID(42, "mp0_out"),
+ CLK_MSR_ID(43, "fclk_div5"),
+ CLK_MSR_ID(44, "pwm_b"),
+ CLK_MSR_ID(45, "pwm_a"),
+ CLK_MSR_ID(46, "vpu"),
+ CLK_MSR_ID(47, "ddr_dpll_pt"),
+ CLK_MSR_ID(48, "mp1_out"),
+ CLK_MSR_ID(49, "mp2_out"),
+ CLK_MSR_ID(50, "mp3_out"),
+ CLK_MSR_ID(51, "sd_emmc_c"),
+ CLK_MSR_ID(52, "sd_emmc_b"),
+ CLK_MSR_ID(53, "sd_emmc_a"),
+ CLK_MSR_ID(54, "vpu_clkc"),
+ CLK_MSR_ID(55, "vid_pll_div_out"),
+ CLK_MSR_ID(56, "wave420l_a"),
+ CLK_MSR_ID(57, "wave420l_c"),
+ CLK_MSR_ID(58, "wave420l_b"),
+ CLK_MSR_ID(59, "hcodec"),
+ CLK_MSR_ID(61, "gpio_msr"),
+ CLK_MSR_ID(62, "hevcb"),
+ CLK_MSR_ID(63, "dsi_meas"),
+ CLK_MSR_ID(64, "spicc_1"),
+ CLK_MSR_ID(65, "spicc_0"),
+ CLK_MSR_ID(66, "vid_lock"),
+ CLK_MSR_ID(67, "dsi_phy"),
+ CLK_MSR_ID(68, "hdcp22_esm"),
+ CLK_MSR_ID(69, "hdcp22_skp"),
+ CLK_MSR_ID(70, "pwm_f"),
+ CLK_MSR_ID(71, "pwm_e"),
+ CLK_MSR_ID(72, "pwm_d"),
+ CLK_MSR_ID(73, "pwm_c"),
+ CLK_MSR_ID(75, "hevcf"),
+ CLK_MSR_ID(77, "rng_ring_osc_0"),
+ CLK_MSR_ID(78, "rng_ring_osc_1"),
+ CLK_MSR_ID(79, "rng_ring_osc_2"),
+ CLK_MSR_ID(80, "rng_ring_osc_3"),
+ CLK_MSR_ID(81, "vapb"),
+ CLK_MSR_ID(82, "ge2d"),
+ CLK_MSR_ID(83, "co_rx"),
+ CLK_MSR_ID(84, "co_tx"),
+ CLK_MSR_ID(89, "hdmi_todig"),
+ CLK_MSR_ID(90, "hdmitx_sys"),
+ CLK_MSR_ID(94, "eth_phy_rx"),
+ CLK_MSR_ID(95, "eth_phy_pll"),
+ CLK_MSR_ID(96, "vpu_b"),
+ CLK_MSR_ID(97, "cpu_b_tmp"),
+ CLK_MSR_ID(98, "ts"),
+ CLK_MSR_ID(99, "ring_osc_out_ee_3"),
+ CLK_MSR_ID(100, "ring_osc_out_ee_4"),
+ CLK_MSR_ID(101, "ring_osc_out_ee_5"),
+ CLK_MSR_ID(102, "ring_osc_out_ee_6"),
+ CLK_MSR_ID(103, "ring_osc_out_ee_7"),
+ CLK_MSR_ID(104, "ring_osc_out_ee_8"),
+ CLK_MSR_ID(105, "ring_osc_out_ee_9"),
+ CLK_MSR_ID(106, "ephy_test"),
+ CLK_MSR_ID(107, "au_dac_g128x"),
+ CLK_MSR_ID(108, "audio_locker_out"),
+ CLK_MSR_ID(109, "audio_locker_in"),
+ CLK_MSR_ID(110, "audio_tdmout_c_sclk"),
+ CLK_MSR_ID(111, "audio_tdmout_b_sclk"),
+ CLK_MSR_ID(112, "audio_tdmout_a_sclk"),
+ CLK_MSR_ID(113, "audio_tdmin_lb_sclk"),
+ CLK_MSR_ID(114, "audio_tdmin_c_sclk"),
+ CLK_MSR_ID(115, "audio_tdmin_b_sclk"),
+ CLK_MSR_ID(116, "audio_tdmin_a_sclk"),
+ CLK_MSR_ID(117, "audio_resample"),
+ CLK_MSR_ID(118, "audio_pdm_sys"),
+ CLK_MSR_ID(119, "audio_spdifout_b"),
+ CLK_MSR_ID(120, "audio_spdifout"),
+ CLK_MSR_ID(121, "audio_spdifin"),
+ CLK_MSR_ID(122, "audio_pdm_dclk"),
+};
+
static int meson_measure_id(struct meson_msr_id *clk_msr_id,
unsigned int duration)
{
@@ -337,6 +525,14 @@ static const struct of_device_id meson_msr_match_table[] = {
.compatible = "amlogic,meson8b-clk-measure",
.data = (void *)clk_msr_m8,
},
+ {
+ .compatible = "amlogic,meson-axg-clk-measure",
+ .data = (void *)clk_msr_axg,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-clk-measure",
+ .data = (void *)clk_msr_g12a,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig
index 055a845ed979..03fa91fbe2da 100644
--- a/drivers/soc/bcm/Kconfig
+++ b/drivers/soc/bcm/Kconfig
@@ -1,5 +1,17 @@
menu "Broadcom SoC drivers"
+config BCM2835_POWER
+ bool "BCM2835 power domain driver"
+ depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
+ default y if ARCH_BCM2835
+ select PM_GENERIC_DOMAINS if PM
+ select RESET_CONTROLLER
+ help
+ This enables support for the BCM2835 power domains and reset
+ controller. Any usage of power domains by the Raspberry Pi
+ firmware means that Linux usage of the same power domain
+ must be accessed using the RASPBERRYPI_POWER driver
+
config RASPBERRYPI_POWER
bool "Raspberry Pi power domain driver"
depends on ARCH_BCM2835 || (COMPILE_TEST && OF)
diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile
index dc4fced72d21..c81df4b2403c 100644
--- a/drivers/soc/bcm/Makefile
+++ b/drivers/soc/bcm/Makefile
@@ -1,2 +1,3 @@
+obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o
obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o
obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
new file mode 100644
index 000000000000..9351349cf0a9
--- /dev/null
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Power domain driver for Broadcom BCM2835
+ *
+ * Copyright (C) 2018 Broadcom
+ */
+
+#include <dt-bindings/soc/bcm2835-pm.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/bcm2835-pm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset-controller.h>
+#include <linux/types.h>
+
+#define PM_GNRIC 0x00
+#define PM_AUDIO 0x04
+#define PM_STATUS 0x18
+#define PM_RSTC 0x1c
+#define PM_RSTS 0x20
+#define PM_WDOG 0x24
+#define PM_PADS0 0x28
+#define PM_PADS2 0x2c
+#define PM_PADS3 0x30
+#define PM_PADS4 0x34
+#define PM_PADS5 0x38
+#define PM_PADS6 0x3c
+#define PM_CAM0 0x44
+#define PM_CAM0_LDOHPEN BIT(2)
+#define PM_CAM0_LDOLPEN BIT(1)
+#define PM_CAM0_CTRLEN BIT(0)
+
+#define PM_CAM1 0x48
+#define PM_CAM1_LDOHPEN BIT(2)
+#define PM_CAM1_LDOLPEN BIT(1)
+#define PM_CAM1_CTRLEN BIT(0)
+
+#define PM_CCP2TX 0x4c
+#define PM_CCP2TX_LDOEN BIT(1)
+#define PM_CCP2TX_CTRLEN BIT(0)
+
+#define PM_DSI0 0x50
+#define PM_DSI0_LDOHPEN BIT(2)
+#define PM_DSI0_LDOLPEN BIT(1)
+#define PM_DSI0_CTRLEN BIT(0)
+
+#define PM_DSI1 0x54
+#define PM_DSI1_LDOHPEN BIT(2)
+#define PM_DSI1_LDOLPEN BIT(1)
+#define PM_DSI1_CTRLEN BIT(0)
+
+#define PM_HDMI 0x58
+#define PM_HDMI_RSTDR BIT(19)
+#define PM_HDMI_LDOPD BIT(1)
+#define PM_HDMI_CTRLEN BIT(0)
+
+#define PM_USB 0x5c
+/* The power gates must be enabled with this bit before enabling the LDO in the
+ * USB block.
+ */
+#define PM_USB_CTRLEN BIT(0)
+
+#define PM_PXLDO 0x60
+#define PM_PXBG 0x64
+#define PM_DFT 0x68
+#define PM_SMPS 0x6c
+#define PM_XOSC 0x70
+#define PM_SPAREW 0x74
+#define PM_SPARER 0x78
+#define PM_AVS_RSTDR 0x7c
+#define PM_AVS_STAT 0x80
+#define PM_AVS_EVENT 0x84
+#define PM_AVS_INTEN 0x88
+#define PM_DUMMY 0xfc
+
+#define PM_IMAGE 0x108
+#define PM_GRAFX 0x10c
+#define PM_PROC 0x110
+#define PM_ENAB BIT(12)
+#define PM_ISPRSTN BIT(8)
+#define PM_H264RSTN BIT(7)
+#define PM_PERIRSTN BIT(6)
+#define PM_V3DRSTN BIT(6)
+#define PM_ISFUNC BIT(5)
+#define PM_MRDONE BIT(4)
+#define PM_MEMREP BIT(3)
+#define PM_ISPOW BIT(2)
+#define PM_POWOK BIT(1)
+#define PM_POWUP BIT(0)
+#define PM_INRUSH_SHIFT 13
+#define PM_INRUSH_3_5_MA 0
+#define PM_INRUSH_5_MA 1
+#define PM_INRUSH_10_MA 2
+#define PM_INRUSH_20_MA 3
+#define PM_INRUSH_MASK (3 << PM_INRUSH_SHIFT)
+
+#define PM_PASSWORD 0x5a000000
+
+#define PM_WDOG_TIME_SET 0x000fffff
+#define PM_RSTC_WRCFG_CLR 0xffffffcf
+#define PM_RSTS_HADWRH_SET 0x00000040
+#define PM_RSTC_WRCFG_SET 0x00000030
+#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
+#define PM_RSTC_RESET 0x00000102
+
+#define PM_READ(reg) readl(power->base + (reg))
+#define PM_WRITE(reg, val) writel(PM_PASSWORD | (val), power->base + (reg))
+
+#define ASB_BRDG_VERSION 0x00
+#define ASB_CPR_CTRL 0x04
+
+#define ASB_V3D_S_CTRL 0x08
+#define ASB_V3D_M_CTRL 0x0c
+#define ASB_ISP_S_CTRL 0x10
+#define ASB_ISP_M_CTRL 0x14
+#define ASB_H264_S_CTRL 0x18
+#define ASB_H264_M_CTRL 0x1c
+
+#define ASB_REQ_STOP BIT(0)
+#define ASB_ACK BIT(1)
+#define ASB_EMPTY BIT(2)
+#define ASB_FULL BIT(3)
+
+#define ASB_AXI_BRDG_ID 0x20
+
+#define ASB_READ(reg) readl(power->asb + (reg))
+#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg))
+
+struct bcm2835_power_domain {
+ struct generic_pm_domain base;
+ struct bcm2835_power *power;
+ u32 domain;
+ struct clk *clk;
+};
+
+struct bcm2835_power {
+ struct device *dev;
+ /* PM registers. */
+ void __iomem *base;
+ /* AXI Async bridge registers. */
+ void __iomem *asb;
+
+ struct genpd_onecell_data pd_xlate;
+ struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT];
+ struct reset_controller_dev reset;
+};
+
+static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
+{
+ u64 start = ktime_get_ns();
+
+ /* Enable the module's async AXI bridges. */
+ ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
+ while (ASB_READ(reg) & ASB_ACK) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
+{
+ u64 start = ktime_get_ns();
+
+ /* Enable the module's async AXI bridges. */
+ ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
+ while (!(ASB_READ(reg) & ASB_ACK)) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+ struct bcm2835_power *power = pd->power;
+
+ /* Enable functional isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC);
+
+ /* Enable electrical isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+
+ /* Open the power switches. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_POWUP);
+
+ return 0;
+}
+
+static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg)
+{
+ struct bcm2835_power *power = pd->power;
+ struct device *dev = power->dev;
+ u64 start;
+ int ret;
+ int inrush;
+ bool powok;
+
+ /* If it was already powered on by the fw, leave it that way. */
+ if (PM_READ(pm_reg) & PM_POWUP)
+ return 0;
+
+ /* Enable power. Allowing too much current at once may result
+ * in POWOK never getting set, so start low and ramp it up as
+ * necessary to succeed.
+ */
+ powok = false;
+ for (inrush = PM_INRUSH_3_5_MA; inrush <= PM_INRUSH_20_MA; inrush++) {
+ PM_WRITE(pm_reg,
+ (PM_READ(pm_reg) & ~PM_INRUSH_MASK) |
+ (inrush << PM_INRUSH_SHIFT) |
+ PM_POWUP);
+
+ start = ktime_get_ns();
+ while (!(powok = !!(PM_READ(pm_reg) & PM_POWOK))) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 3000)
+ break;
+ }
+ }
+ if (!powok) {
+ dev_err(dev, "Timeout waiting for %s power OK\n",
+ pd->base.name);
+ ret = -ETIMEDOUT;
+ goto err_disable_powup;
+ }
+
+ /* Disable electrical isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISPOW);
+
+ /* Repair memory */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_MEMREP);
+ start = ktime_get_ns();
+ while (!(PM_READ(pm_reg) & PM_MRDONE)) {
+ cpu_relax();
+ if (ktime_get_ns() - start >= 1000) {
+ dev_err(dev, "Timeout waiting for %s memory repair\n",
+ pd->base.name);
+ ret = -ETIMEDOUT;
+ goto err_disable_ispow;
+ }
+ }
+
+ /* Disable functional isolation */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISFUNC);
+
+ return 0;
+
+err_disable_ispow:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW);
+err_disable_powup:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~(PM_POWUP | PM_INRUSH_MASK));
+ return ret;
+}
+
+static int bcm2835_asb_power_on(struct bcm2835_power_domain *pd,
+ u32 pm_reg,
+ u32 asb_m_reg,
+ u32 asb_s_reg,
+ u32 reset_flags)
+{
+ struct bcm2835_power *power = pd->power;
+ int ret;
+
+ ret = clk_prepare_enable(pd->clk);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable clock for %s\n",
+ pd->base.name);
+ return ret;
+ }
+
+ /* Wait 32 clocks for reset to propagate, 1 us will be enough */
+ udelay(1);
+
+ clk_disable_unprepare(pd->clk);
+
+ /* Deassert the resets. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) | reset_flags);
+
+ ret = clk_prepare_enable(pd->clk);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable clock for %s\n",
+ pd->base.name);
+ goto err_enable_resets;
+ }
+
+ ret = bcm2835_asb_enable(power, asb_m_reg);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable ASB master for %s\n",
+ pd->base.name);
+ goto err_disable_clk;
+ }
+ ret = bcm2835_asb_enable(power, asb_s_reg);
+ if (ret) {
+ dev_err(power->dev, "Failed to enable ASB slave for %s\n",
+ pd->base.name);
+ goto err_disable_asb_master;
+ }
+
+ return 0;
+
+err_disable_asb_master:
+ bcm2835_asb_disable(power, asb_m_reg);
+err_disable_clk:
+ clk_disable_unprepare(pd->clk);
+err_enable_resets:
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+ return ret;
+}
+
+static int bcm2835_asb_power_off(struct bcm2835_power_domain *pd,
+ u32 pm_reg,
+ u32 asb_m_reg,
+ u32 asb_s_reg,
+ u32 reset_flags)
+{
+ struct bcm2835_power *power = pd->power;
+ int ret;
+
+ ret = bcm2835_asb_disable(power, asb_s_reg);
+ if (ret) {
+ dev_warn(power->dev, "Failed to disable ASB slave for %s\n",
+ pd->base.name);
+ return ret;
+ }
+ ret = bcm2835_asb_disable(power, asb_m_reg);
+ if (ret) {
+ dev_warn(power->dev, "Failed to disable ASB master for %s\n",
+ pd->base.name);
+ bcm2835_asb_enable(power, asb_s_reg);
+ return ret;
+ }
+
+ clk_disable_unprepare(pd->clk);
+
+ /* Assert the resets. */
+ PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags);
+
+ return 0;
+}
+
+static int bcm2835_power_pd_power_on(struct generic_pm_domain *domain)
+{
+ struct bcm2835_power_domain *pd =
+ container_of(domain, struct bcm2835_power_domain, base);
+ struct bcm2835_power *power = pd->power;
+
+ switch (pd->domain) {
+ case BCM2835_POWER_DOMAIN_GRAFX:
+ return bcm2835_power_power_on(pd, PM_GRAFX);
+
+ case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+ return bcm2835_asb_power_on(pd, PM_GRAFX,
+ ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+ PM_V3DRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE:
+ return bcm2835_power_power_on(pd, PM_IMAGE);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ 0, 0,
+ PM_PERIRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+ PM_ISPRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_H264:
+ return bcm2835_asb_power_on(pd, PM_IMAGE,
+ ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+ PM_H264RSTN);
+
+ case BCM2835_POWER_DOMAIN_USB:
+ PM_WRITE(PM_USB, PM_USB_CTRLEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI0:
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN | PM_DSI0_LDOHPEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI1:
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN | PM_DSI1_LDOHPEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_CCP2TX:
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN | PM_CCP2TX_LDOEN);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_HDMI:
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_RSTDR);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_CTRLEN);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_LDOPD);
+ usleep_range(100, 200);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_RSTDR);
+ return 0;
+
+ default:
+ dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+ return -EINVAL;
+ }
+}
+
+static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
+{
+ struct bcm2835_power_domain *pd =
+ container_of(domain, struct bcm2835_power_domain, base);
+ struct bcm2835_power *power = pd->power;
+
+ switch (pd->domain) {
+ case BCM2835_POWER_DOMAIN_GRAFX:
+ return bcm2835_power_power_off(pd, PM_GRAFX);
+
+ case BCM2835_POWER_DOMAIN_GRAFX_V3D:
+ return bcm2835_asb_power_off(pd, PM_GRAFX,
+ ASB_V3D_M_CTRL, ASB_V3D_S_CTRL,
+ PM_V3DRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE:
+ return bcm2835_power_power_off(pd, PM_IMAGE);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_PERI:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ 0, 0,
+ PM_PERIRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_ISP:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ ASB_ISP_M_CTRL, ASB_ISP_S_CTRL,
+ PM_ISPRSTN);
+
+ case BCM2835_POWER_DOMAIN_IMAGE_H264:
+ return bcm2835_asb_power_off(pd, PM_IMAGE,
+ ASB_H264_M_CTRL, ASB_H264_S_CTRL,
+ PM_H264RSTN);
+
+ case BCM2835_POWER_DOMAIN_USB:
+ PM_WRITE(PM_USB, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI0:
+ PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN);
+ PM_WRITE(PM_DSI0, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_DSI1:
+ PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN);
+ PM_WRITE(PM_DSI1, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_CCP2TX:
+ PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN);
+ PM_WRITE(PM_CCP2TX, 0);
+ return 0;
+
+ case BCM2835_POWER_DOMAIN_HDMI:
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_LDOPD);
+ PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_CTRLEN);
+ return 0;
+
+ default:
+ dev_err(power->dev, "Invalid domain %d\n", pd->domain);
+ return -EINVAL;
+ }
+}
+
+static void
+bcm2835_init_power_domain(struct bcm2835_power *power,
+ int pd_xlate_index, const char *name)
+{
+ struct device *dev = power->dev;
+ struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
+
+ dom->clk = devm_clk_get(dev->parent, name);
+
+ dom->base.name = name;
+ dom->base.power_on = bcm2835_power_pd_power_on;
+ dom->base.power_off = bcm2835_power_pd_power_off;
+
+ dom->domain = pd_xlate_index;
+ dom->power = power;
+
+ /* XXX: on/off at boot? */
+ pm_genpd_init(&dom->base, NULL, true);
+
+ power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+}
+
+/** bcm2835_reset_reset - Resets a block that has a reset line in the
+ * PM block.
+ *
+ * The consumer of the reset controller must have the power domain up
+ * -- there's no reset ability with the power domain down. To reset
+ * the sub-block, we just disable its access to memory through the
+ * ASB, reset, and re-enable.
+ */
+static int bcm2835_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+ reset);
+ struct bcm2835_power_domain *pd;
+ int ret;
+
+ switch (id) {
+ case BCM2835_RESET_V3D:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_GRAFX_V3D];
+ break;
+ case BCM2835_RESET_H264:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_H264];
+ break;
+ case BCM2835_RESET_ISP:
+ pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_ISP];
+ break;
+ default:
+ dev_err(power->dev, "Bad reset id %ld\n", id);
+ return -EINVAL;
+ }
+
+ ret = bcm2835_power_pd_power_off(&pd->base);
+ if (ret)
+ return ret;
+
+ return bcm2835_power_pd_power_on(&pd->base);
+}
+
+static int bcm2835_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power,
+ reset);
+
+ switch (id) {
+ case BCM2835_RESET_V3D:
+ return !PM_READ(PM_GRAFX & PM_V3DRSTN);
+ case BCM2835_RESET_H264:
+ return !PM_READ(PM_IMAGE & PM_H264RSTN);
+ case BCM2835_RESET_ISP:
+ return !PM_READ(PM_IMAGE & PM_ISPRSTN);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct reset_control_ops bcm2835_reset_ops = {
+ .reset = bcm2835_reset_reset,
+ .status = bcm2835_reset_status,
+};
+
+static const char *const power_domain_names[] = {
+ [BCM2835_POWER_DOMAIN_GRAFX] = "grafx",
+ [BCM2835_POWER_DOMAIN_GRAFX_V3D] = "v3d",
+
+ [BCM2835_POWER_DOMAIN_IMAGE] = "image",
+ [BCM2835_POWER_DOMAIN_IMAGE_PERI] = "peri_image",
+ [BCM2835_POWER_DOMAIN_IMAGE_H264] = "h264",
+ [BCM2835_POWER_DOMAIN_IMAGE_ISP] = "isp",
+
+ [BCM2835_POWER_DOMAIN_USB] = "usb",
+ [BCM2835_POWER_DOMAIN_DSI0] = "dsi0",
+ [BCM2835_POWER_DOMAIN_DSI1] = "dsi1",
+ [BCM2835_POWER_DOMAIN_CAM0] = "cam0",
+ [BCM2835_POWER_DOMAIN_CAM1] = "cam1",
+ [BCM2835_POWER_DOMAIN_CCP2TX] = "ccp2tx",
+ [BCM2835_POWER_DOMAIN_HDMI] = "hdmi",
+};
+
+static int bcm2835_power_probe(struct platform_device *pdev)
+{
+ struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct bcm2835_power *power;
+ static const struct {
+ int parent, child;
+ } domain_deps[] = {
+ { BCM2835_POWER_DOMAIN_GRAFX, BCM2835_POWER_DOMAIN_GRAFX_V3D },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_PERI },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_H264 },
+ { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_ISP },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_USB },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
+ { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
+ };
+ int ret, i;
+ u32 id;
+
+ power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
+ if (!power)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, power);
+
+ power->dev = dev;
+ power->base = pm->base;
+ power->asb = pm->asb;
+
+ id = ASB_READ(ASB_AXI_BRDG_ID);
+ if (id != 0x62726467 /* "BRDG" */) {
+ dev_err(dev, "ASB register ID returned 0x%08x\n", id);
+ return -ENODEV;
+ }
+
+ power->pd_xlate.domains = devm_kcalloc(dev,
+ ARRAY_SIZE(power_domain_names),
+ sizeof(*power->pd_xlate.domains),
+ GFP_KERNEL);
+ if (!power->pd_xlate.domains)
+ return -ENOMEM;
+
+ power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
+
+ for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
+ bcm2835_init_power_domain(power, i, power_domain_names[i]);
+
+ for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
+ pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
+ &power->domains[domain_deps[i].child].base);
+ }
+
+ power->reset.owner = THIS_MODULE;
+ power->reset.nr_resets = BCM2835_RESET_COUNT;
+ power->reset.ops = &bcm2835_reset_ops;
+ power->reset.of_node = dev->parent->of_node;
+
+ ret = devm_reset_controller_register(dev, &power->reset);
+ if (ret)
+ return ret;
+
+ of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
+
+ dev_info(dev, "Broadcom BCM2835 power domains driver");
+ return 0;
+}
+
+static int bcm2835_power_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver bcm2835_power_driver = {
+ .probe = bcm2835_power_probe,
+ .remove = bcm2835_power_remove,
+ .driver = {
+ .name = "bcm2835-power",
+ },
+};
+module_platform_driver(bcm2835_power_driver);
+
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM power domains and reset");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 8f80e8bbf29e..61f8e1433d0a 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -22,6 +22,7 @@ config FSL_GUTS
config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
+ select SOC_BUS
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index 5814d2f395a4..e13fd3ac1939 100644
--- a/drivers/soc/fsl/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -26,6 +26,7 @@
#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
#define DPIO_CMDID_RESET DPIO_CMD(0x005)
+#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120)
struct dpio_cmd_open {
__le32 dpio_id;
@@ -47,4 +48,8 @@ struct dpio_rsp_get_attr {
__le32 qbman_version;
};
+struct dpio_stashing_dest {
+ u8 sdest;
+};
+
#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index 2d4af32a0dec..c0cdc8946031 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
@@ -32,6 +33,46 @@ struct dpio_priv {
static cpumask_var_t cpus_unused_mask;
+static const struct soc_device_attribute ls1088a_soc[] = {
+ {.family = "QorIQ LS1088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2080a_soc[] = {
+ {.family = "QorIQ LS2080A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute ls2088a_soc[] = {
+ {.family = "QorIQ LS2088A"},
+ { /* sentinel */ }
+};
+
+static const struct soc_device_attribute lx2160a_soc[] = {
+ {.family = "QorIQ LX2160A"},
+ { /* sentinel */ }
+};
+
+static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu)
+{
+ int cluster_base, cluster_size;
+
+ if (soc_device_match(ls1088a_soc)) {
+ cluster_base = 2;
+ cluster_size = 4;
+ } else if (soc_device_match(ls2080a_soc) ||
+ soc_device_match(ls2088a_soc) ||
+ soc_device_match(lx2160a_soc)) {
+ cluster_base = 0;
+ cluster_size = 2;
+ } else {
+ dev_err(&dpio_dev->dev, "unknown SoC version\n");
+ return -1;
+ }
+
+ return cluster_base + cpu / cluster_size;
+}
+
static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
{
struct device *dev = (struct device *)arg;
@@ -89,6 +130,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
int err = -ENOMEM;
struct device *dev = &dpio_dev->dev;
int possible_next_cpu;
+ int sdest;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -145,6 +187,16 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
desc.cpu = possible_next_cpu;
cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask);
+ sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu);
+ if (sdest >= 0) {
+ err = dpio_set_stashing_destination(dpio_dev->mc_io, 0,
+ dpio_dev->mc_handle,
+ sdest);
+ if (err)
+ dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n",
+ desc.cpu);
+ }
+
/*
* Set the CENA regs to be the cache inhibited area of the portal to
* avoid coherency issues if a user migrates to another core.
@@ -220,12 +272,12 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
dev = &dpio_dev->dev;
priv = dev_get_drvdata(dev);
+ cpu = dpaa2_io_get_cpu(priv->io);
dpaa2_io_down(priv->io);
dpio_teardown_irqs(dpio_dev);
- cpu = dpaa2_io_get_cpu(priv->io);
cpumask_set_cpu(cpu, cpus_unused_mask);
err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index bc801934602a..b9539ef2c3cd 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -473,7 +473,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
* Return 0 for success, and negative error code for failure.
*/
int dpaa2_io_service_release(struct dpaa2_io *d,
- u32 bpid,
+ u16 bpid,
const u64 *buffers,
unsigned int num_buffers)
{
@@ -502,7 +502,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
* Eg. if the buffer pool is empty, this will return zero.
*/
int dpaa2_io_service_acquire(struct dpaa2_io *d,
- u32 bpid,
+ u16 bpid,
u64 *buffers,
unsigned int num_buffers)
{
@@ -630,6 +630,7 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
ret = NULL;
} else {
+ prefetch(&s->vaddr[s->idx]);
*is_last = 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index 521bc6946317..af74c597a675 100644
--- a/drivers/soc/fsl/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -166,6 +166,22 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 sdest)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpio_stashing_dest *dpio_cmd;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags, token);
+ dpio_cmd = (struct dpio_stashing_dest *)cmd.params;
+ dpio_cmd->sdest = sdest;
+
+ return mc_send_command(mc_io, &cmd);
+}
+
/**
* dpio_get_api_version - Get Data Path I/O API version
* @mc_io: Pointer to MC portal's DPIO object
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index b2ac4ba4fb8e..da06f7258098 100644
--- a/drivers/soc/fsl/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -75,6 +75,11 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpio_attr *attr);
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 dest);
+
int dpio_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 0bddb85c0ae5..d02013556a1b 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -169,9 +169,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
3, /* RPM: Valid bit mode, RCR in array mode */
2, /* DCM: Discrete consumption ack mode */
3, /* EPM: Valid bit mode, EQCR in array mode */
- 0, /* mem stashing drop enable == FALSE */
+ 1, /* mem stashing drop enable == TRUE */
1, /* mem stashing priority == TRUE */
- 0, /* mem stashing enable == FALSE */
+ 1, /* mem stashing enable == TRUE */
1, /* dequeue stashing priority == TRUE */
0, /* dequeue stashing enable == FALSE */
0); /* EQCR_CI stashing priority == FALSE */
@@ -180,6 +180,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
if (!reg) {
pr_err("qbman: the portal is not enabled!\n");
+ kfree(p);
return NULL;
}
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 302e0c8d69d9..63f6df86f9e5 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -32,6 +32,7 @@ struct fsl_soc_die_attr {
static struct guts *guts;
static struct soc_device_attribute soc_dev_attr;
static struct soc_device *soc_dev;
+static struct device_node *root;
/* SoC die attribute definition for QorIQ platform */
@@ -114,7 +115,7 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match(
return NULL;
}
-u32 fsl_guts_get_svr(void)
+static u32 fsl_guts_get_svr(void)
{
u32 svr = 0;
@@ -128,11 +129,10 @@ u32 fsl_guts_get_svr(void)
return svr;
}
-EXPORT_SYMBOL(fsl_guts_get_svr);
static int fsl_guts_probe(struct platform_device *pdev)
{
- struct device_node *root, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct resource *res;
const struct fsl_soc_die_attr *soc_die;
@@ -155,9 +155,8 @@ static int fsl_guts_probe(struct platform_device *pdev)
root = of_find_node_by_path("/");
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
- of_node_put(root);
if (machine)
- soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ soc_dev_attr.machine = machine;
svr = fsl_guts_get_svr();
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -192,6 +191,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
static int fsl_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
+ of_node_put(root);
return 0;
}
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 2112d18dbb7b..d80f899d22f9 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -2,7 +2,7 @@ menu "i.MX SoC drivers"
config IMX_GPCV2_PM_DOMAINS
bool "i.MX GPCv2 PM domains"
- depends on SOC_IMX7D || SOC_IMX8MQ || (COMPILE_TEST && OF)
+ depends on ARCH_MXC || (COMPILE_TEST && OF)
depends on PM
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 8b4f48a2ca57..176f473127b6 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -8,6 +8,7 @@
* Copyright 2015-2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
*/
+#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -65,6 +66,12 @@
#define GPC_M4_PU_PDN_FLG 0x1bc
+#define GPC_PU_PWRHSK 0x1fc
+
+#define IMX8M_GPU_HSK_PWRDNREQN BIT(6)
+#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
+#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
+
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
@@ -92,16 +99,21 @@
#define GPC_PGC_CTRL_PCR BIT(0)
+#define GPC_CLK_MAX 6
+
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
struct regulator *regulator;
+ struct clk *clk[GPC_CLK_MAX];
+ int num_clks;
unsigned int pgc;
const struct {
u32 pxx;
u32 map;
+ u32 hsk;
} bits;
const int voltage;
@@ -125,7 +137,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
const bool enable_power_control = !on;
const bool has_regulator = !IS_ERR(domain->regulator);
unsigned long deadline;
- int ret = 0;
+ int i, ret = 0;
regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
domain->bits.map, domain->bits.map);
@@ -138,10 +150,18 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
}
}
+ /* Enable reset clocks for all devices in the domain */
+ for (i = 0; i < domain->num_clks; i++)
+ clk_prepare_enable(domain->clk[i]);
+
if (enable_power_control)
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ if (domain->bits.hsk)
+ regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
+ domain->bits.hsk, on ? domain->bits.hsk : 0);
+
regmap_update_bits(domain->regmap, offset,
domain->bits.pxx, domain->bits.pxx);
@@ -179,6 +199,10 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
GPC_PGC_CTRL_PCR, 0);
+ /* Disable reset clocks for all devices in the domain */
+ for (i = 0; i < domain->num_clks; i++)
+ clk_disable_unprepare(domain->clk[i]);
+
if (has_regulator && !on) {
int err;
@@ -328,6 +352,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_GPU_SW_Pxx_REQ,
.map = IMX8M_GPU_A53_DOMAIN,
+ .hsk = IMX8M_GPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_GPU,
},
@@ -339,6 +364,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_VPU_SW_Pxx_REQ,
.map = IMX8M_VPU_A53_DOMAIN,
+ .hsk = IMX8M_VPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_VPU,
},
@@ -350,6 +376,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_DISP_SW_Pxx_REQ,
.map = IMX8M_DISP_A53_DOMAIN,
+ .hsk = IMX8M_DISP_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_DISP,
},
@@ -390,7 +417,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
static const struct regmap_range imx8m_yes_ranges[] = {
regmap_reg_range(GPC_LPCR_A_CORE_BSC,
- GPC_M4_PU_PDN_FLG),
+ GPC_PU_PWRHSK),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI),
GPC_PGC_SR(IMX8M_PGC_MIPI)),
regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1),
@@ -426,6 +453,41 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.reg_access_table = &imx8m_access_table,
};
+static int imx_pgc_get_clocks(struct imx_pgc_domain *domain)
+{
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ struct clk *clk = of_clk_get(domain->dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= GPC_CLK_MAX) {
+ dev_err(domain->dev, "more than %d clocks\n",
+ GPC_CLK_MAX);
+ ret = -EINVAL;
+ goto clk_err;
+ }
+ domain->clk[i] = clk;
+ }
+ domain->num_clks = i;
+
+ return 0;
+
+clk_err:
+ while (i--)
+ clk_put(domain->clk[i]);
+
+ return ret;
+}
+
+static void imx_pgc_put_clocks(struct imx_pgc_domain *domain)
+{
+ int i;
+
+ for (i = domain->num_clks - 1; i >= 0; i--)
+ clk_put(domain->clk[i]);
+}
+
static int imx_pgc_domain_probe(struct platform_device *pdev)
{
struct imx_pgc_domain *domain = pdev->dev.platform_data;
@@ -445,9 +507,17 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
domain->voltage, domain->voltage);
}
+ ret = imx_pgc_get_clocks(domain);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(domain->dev, "Failed to get domain's clocks\n");
+ return ret;
+ }
+
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err(domain->dev, "Failed to init power domain\n");
+ imx_pgc_put_clocks(domain);
return ret;
}
@@ -456,6 +526,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
if (ret) {
dev_err(domain->dev, "Failed to add genpd provider\n");
pm_genpd_remove(&domain->genpd);
+ imx_pgc_put_clocks(domain);
}
return ret;
@@ -467,6 +538,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
of_genpd_del_provider(domain->dev->of_node);
pm_genpd_remove(&domain->genpd);
+ imx_pgc_put_clocks(domain);
return 0;
}
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index fcbf8a2e4080..1ee298f6bf17 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -98,6 +98,24 @@ config QCOM_RPMH
of hardware components aggregate requests for these resources and
help apply the aggregated state on the resource.
+config QCOM_RPMHPD
+ bool "Qualcomm RPMh Power domain driver"
+ depends on QCOM_RPMH && QCOM_COMMAND_DB
+ help
+ QCOM RPMh Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPMh which then translates it into corresponding voltage
+ for the voltage rail.
+
+config QCOM_RPMPD
+ bool "Qualcomm RPM Power domain driver"
+ depends on QCOM_SMD_RPM=y
+ help
+ QCOM RPM Power domain driver to support power-domains with
+ performance states. The driver communicates a performance state
+ value to RPM which then translates it into corresponding voltage
+ for the voltage rail.
+
config QCOM_SMEM
tristate "Qualcomm Shared Memory Manager (SMEM)"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f25b54cd6cf8..ffe519b0cb66 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -21,3 +21,5 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
+obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o
+obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
index 2e1e4f0a5db8..86600d97c36d 100644
--- a/drivers/soc/qcom/llcc-sdm845.c
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -71,6 +71,11 @@ static struct llcc_slice_config sdm845_data[] = {
SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
};
+static int sdm845_qcom_llcc_remove(struct platform_device *pdev)
+{
+ return qcom_llcc_remove(pdev);
+}
+
static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
{
return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
@@ -87,6 +92,7 @@ static struct platform_driver sdm845_qcom_llcc_driver = {
.of_match_table = sdm845_qcom_llcc_of_match,
},
.probe = sdm845_qcom_llcc_probe,
+ .remove = sdm845_qcom_llcc_remove,
};
module_platform_driver(sdm845_qcom_llcc_driver);
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index 80667f7be52c..9090ea12eaf3 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -46,7 +46,7 @@
#define BANK_OFFSET_STRIDE 0x80000
-static struct llcc_drv_data *drv_data;
+static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
static const struct regmap_config llcc_regmap_config = {
.reg_bits = 32,
@@ -68,6 +68,9 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid)
struct llcc_slice_desc *desc;
u32 sz, count;
+ if (IS_ERR(drv_data))
+ return ERR_CAST(drv_data);
+
cfg = drv_data->cfg;
sz = drv_data->cfg_size;
@@ -108,6 +111,9 @@ static int llcc_update_act_ctrl(u32 sid,
u32 slice_status;
int ret;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
status_reg = LLCC_TRP_STATUSn(sid);
@@ -143,6 +149,9 @@ int llcc_slice_activate(struct llcc_slice_desc *desc)
int ret;
u32 act_ctrl_val;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
if (IS_ERR_OR_NULL(desc))
return -EINVAL;
@@ -180,6 +189,9 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc)
u32 act_ctrl_val;
int ret;
+ if (IS_ERR(drv_data))
+ return PTR_ERR(drv_data);
+
if (IS_ERR_OR_NULL(desc))
return -EINVAL;
@@ -289,46 +301,62 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
return ret;
}
+int qcom_llcc_remove(struct platform_device *pdev)
+{
+ /* Set the global pointer to a error code to avoid referencing it */
+ drv_data = ERR_PTR(-ENODEV);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_llcc_remove);
+
+static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res)
+ return ERR_PTR(-ENODEV);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config);
+}
+
int qcom_llcc_probe(struct platform_device *pdev,
const struct llcc_slice_config *llcc_cfg, u32 sz)
{
u32 num_banks;
struct device *dev = &pdev->dev;
- struct resource *llcc_banks_res, *llcc_bcast_res;
- void __iomem *llcc_banks_base, *llcc_bcast_base;
int ret, i;
struct platform_device *llcc_edac;
drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data)
- return -ENOMEM;
-
- llcc_banks_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "llcc_base");
- llcc_banks_base = devm_ioremap_resource(&pdev->dev, llcc_banks_res);
- if (IS_ERR(llcc_banks_base))
- return PTR_ERR(llcc_banks_base);
-
- drv_data->regmap = devm_regmap_init_mmio(dev, llcc_banks_base,
- &llcc_regmap_config);
- if (IS_ERR(drv_data->regmap))
- return PTR_ERR(drv_data->regmap);
-
- llcc_bcast_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "llcc_broadcast_base");
- llcc_bcast_base = devm_ioremap_resource(&pdev->dev, llcc_bcast_res);
- if (IS_ERR(llcc_bcast_base))
- return PTR_ERR(llcc_bcast_base);
-
- drv_data->bcast_regmap = devm_regmap_init_mmio(dev, llcc_bcast_base,
- &llcc_regmap_config);
- if (IS_ERR(drv_data->bcast_regmap))
- return PTR_ERR(drv_data->bcast_regmap);
+ if (!drv_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base");
+ if (IS_ERR(drv_data->regmap)) {
+ ret = PTR_ERR(drv_data->regmap);
+ goto err;
+ }
+
+ drv_data->bcast_regmap =
+ qcom_llcc_init_mmio(pdev, "llcc_broadcast_base");
+ if (IS_ERR(drv_data->bcast_regmap)) {
+ ret = PTR_ERR(drv_data->bcast_regmap);
+ goto err;
+ }
ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
&num_banks);
if (ret)
- return ret;
+ goto err;
num_banks &= LLCC_LB_CNT_MASK;
num_banks >>= LLCC_LB_CNT_SHIFT;
@@ -340,8 +368,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
GFP_KERNEL);
- if (!drv_data->offsets)
- return -ENOMEM;
+ if (!drv_data->offsets) {
+ ret = -ENOMEM;
+ goto err;
+ }
for (i = 0; i < num_banks; i++)
drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
@@ -349,8 +379,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
drv_data->bitmap = devm_kcalloc(dev,
BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
GFP_KERNEL);
- if (!drv_data->bitmap)
- return -ENOMEM;
+ if (!drv_data->bitmap) {
+ ret = -ENOMEM;
+ goto err;
+ }
drv_data->cfg = llcc_cfg;
drv_data->cfg_size = sz;
@@ -359,7 +391,7 @@ int qcom_llcc_probe(struct platform_device *pdev,
ret = qcom_llcc_cfg_program(pdev);
if (ret)
- return ret;
+ goto err;
drv_data->ecc_irq = platform_get_irq(pdev, 0);
if (drv_data->ecc_irq >= 0) {
@@ -370,6 +402,9 @@ int qcom_llcc_probe(struct platform_device *pdev,
dev_err(dev, "Failed to register llcc edac driver\n");
}
+ return 0;
+err:
+ drv_data = ERR_PTR(-ENODEV);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_llcc_probe);
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 09c669e70d63..038abc377fdb 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
- int i;
+ int i, ret;
u32 mask, gsbi_num;
const struct crci_config *config = NULL;
@@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gsbi);
- return of_platform_populate(node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ if (ret)
+ clk_disable_unprepare(gsbi->hclk);
+ return ret;
}
static int gsbi_remove(struct platform_device *pdev)
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 97bb5989aa21..7200d762a951 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -45,9 +45,9 @@ static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
char *buf);
-static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL);
-static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL);
+static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL);
static ssize_t qcom_rmtfs_mem_show(struct device *dev,
struct device_attribute *attr,
@@ -132,6 +132,11 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp)
return 0;
}
+static struct class rmtfs_class = {
+ .owner = THIS_MODULE,
+ .name = "rmtfs",
+};
+
static const struct file_operations qcom_rmtfs_mem_fops = {
.owner = THIS_MODULE,
.open = qcom_rmtfs_mem_open,
@@ -199,6 +204,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id);
rmtfs_mem->dev.id = client_id;
+ rmtfs_mem->dev.class = &rmtfs_class;
rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id);
ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev);
@@ -277,32 +283,42 @@ static struct platform_driver qcom_rmtfs_mem_driver = {
},
};
-static int qcom_rmtfs_mem_init(void)
+static int __init qcom_rmtfs_mem_init(void)
{
int ret;
+ ret = class_register(&rmtfs_class);
+ if (ret)
+ return ret;
+
ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0,
QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem");
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n");
- return ret;
+ goto unregister_class;
}
ret = platform_driver_register(&qcom_rmtfs_mem_driver);
if (ret < 0) {
pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n");
- unregister_chrdev_region(qcom_rmtfs_mem_major,
- QCOM_RMTFS_MEM_DEV_MAX);
+ goto unregister_chrdev;
}
+ return 0;
+
+unregister_chrdev:
+ unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+unregister_class:
+ class_unregister(&rmtfs_class);
return ret;
}
module_init(qcom_rmtfs_mem_init);
-static void qcom_rmtfs_mem_exit(void)
+static void __exit qcom_rmtfs_mem_exit(void)
{
platform_driver_unregister(&qcom_rmtfs_mem_driver);
unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX);
+ class_unregister(&rmtfs_class);
}
module_exit(qcom_rmtfs_mem_exit);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index c7beb6841289..035091fd44b8 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
msg);
struct completion *compl = rpm_msg->completion;
+ bool free = rpm_msg->needs_free;
rpm_msg->err = r;
@@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
complete(compl);
exit:
- if (rpm_msg->needs_free)
+ if (free)
kfree(rpm_msg);
}
@@ -192,9 +193,8 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
- ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
- &rpm_msg->msg);
/* Clean up our call by spoofing tx_done */
+ ret = 0;
rpmh_tx_done(&rpm_msg->msg, ret);
}
@@ -348,11 +348,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
{
struct batch_cache_req *req;
struct rpmh_request *rpm_msgs;
- DECLARE_COMPLETION_ONSTACK(compl);
+ struct completion *compls;
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
unsigned long time_left;
int count = 0;
- int ret, i, j;
+ int ret, i;
+ void *ptr;
if (!cmd || !n)
return -EINVAL;
@@ -362,10 +363,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
if (!count)
return -EINVAL;
- req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
+ ptr = kzalloc(sizeof(*req) +
+ count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
GFP_ATOMIC);
- if (!req)
+ if (!ptr)
return -ENOMEM;
+
+ req = ptr;
+ compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
+
req->count = count;
rpm_msgs = req->rpm_msgs;
@@ -380,25 +386,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
for (i = 0; i < count; i++) {
- rpm_msgs[i].completion = &compl;
+ struct completion *compl = &compls[i];
+
+ init_completion(compl);
+ rpm_msgs[i].completion = compl;
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
if (ret) {
pr_err("Error(%d) sending RPMH message addr=%#x\n",
ret, rpm_msgs[i].msg.cmds[0].addr);
- for (j = i; j < count; j++)
- rpmh_tx_done(&rpm_msgs[j].msg, ret);
break;
}
}
time_left = RPMH_TIMEOUT_MS;
- for (i = 0; i < count; i++) {
- time_left = wait_for_completion_timeout(&compl, time_left);
+ while (i--) {
+ time_left = wait_for_completion_timeout(&compls[i], time_left);
if (!time_left) {
/*
* Better hope they never finish because they'll signal
- * the completion on our stack and that's bad once
- * we've returned from the function.
+ * the completion that we're going to free once
+ * we've returned from this function.
*/
WARN_ON(1);
ret = -ETIMEDOUT;
@@ -407,7 +414,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
}
exit:
- kfree(req);
+ kfree(ptr);
return ret;
}
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
new file mode 100644
index 000000000000..5741ec3fa814
--- /dev/null
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
+
+#define RPMH_ARC_MAX_LEVELS 16
+
+/**
+ * struct rpmhpd - top level RPMh power domain resource data structure
+ * @dev: rpmh power domain controller device
+ * @pd: generic_pm_domain corrresponding to the power domain
+ * @peer: A peer power domain in case Active only Voting is
+ * supported
+ * @active_only: True if it represents an Active only peer
+ * @level: An array of level (vlvl) to corner (hlvl) mappings
+ * derived from cmd-db
+ * @level_count: Number of levels supported by the power domain. max
+ * being 16 (0 - 15)
+ * @enabled: true if the power domain is enabled
+ * @res_name: Resource name used for cmd-db lookup
+ * @addr: Resource address as looped up using resource name from
+ * cmd-db
+ */
+struct rpmhpd {
+ struct device *dev;
+ struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
+ struct rpmhpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ unsigned int active_corner;
+ u32 level[RPMH_ARC_MAX_LEVELS];
+ size_t level_count;
+ bool enabled;
+ const char *res_name;
+ u32 addr;
+};
+
+struct rpmhpd_desc {
+ struct rpmhpd **rpmhpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmhpd_lock);
+
+/* SDM845 RPMH powerdomains */
+
+static struct rpmhpd sdm845_ebi = {
+ .pd = { .name = "ebi", },
+ .res_name = "ebi.lvl",
+};
+
+static struct rpmhpd sdm845_lmx = {
+ .pd = { .name = "lmx", },
+ .res_name = "lmx.lvl",
+};
+
+static struct rpmhpd sdm845_lcx = {
+ .pd = { .name = "lcx", },
+ .res_name = "lcx.lvl",
+};
+
+static struct rpmhpd sdm845_gfx = {
+ .pd = { .name = "gfx", },
+ .res_name = "gfx.lvl",
+};
+
+static struct rpmhpd sdm845_mss = {
+ .pd = { .name = "mss", },
+ .res_name = "mss.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao;
+static struct rpmhpd sdm845_mx = {
+ .pd = { .name = "mx", },
+ .peer = &sdm845_mx_ao,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_mx_ao = {
+ .pd = { .name = "mx_ao", },
+ .peer = &sdm845_mx,
+ .res_name = "mx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao;
+static struct rpmhpd sdm845_cx = {
+ .pd = { .name = "cx", },
+ .peer = &sdm845_cx_ao,
+ .parent = &sdm845_mx.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd sdm845_cx_ao = {
+ .pd = { .name = "cx_ao", },
+ .peer = &sdm845_cx,
+ .parent = &sdm845_mx_ao.pd,
+ .res_name = "cx.lvl",
+};
+
+static struct rpmhpd *sdm845_rpmhpds[] = {
+ [SDM845_EBI] = &sdm845_ebi,
+ [SDM845_MX] = &sdm845_mx,
+ [SDM845_MX_AO] = &sdm845_mx_ao,
+ [SDM845_CX] = &sdm845_cx,
+ [SDM845_CX_AO] = &sdm845_cx_ao,
+ [SDM845_LMX] = &sdm845_lmx,
+ [SDM845_LCX] = &sdm845_lcx,
+ [SDM845_GFX] = &sdm845_gfx,
+ [SDM845_MSS] = &sdm845_mss,
+};
+
+static const struct rpmhpd_desc sdm845_desc = {
+ .rpmhpds = sdm845_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdm845_rpmhpds),
+};
+
+static const struct of_device_id rpmhpd_match_table[] = {
+ { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { }
+};
+
+static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
+ unsigned int corner, bool sync)
+{
+ struct tcs_cmd cmd = {
+ .addr = pd->addr,
+ .data = corner,
+ };
+
+ /*
+ * Wait for an ack only when we are increasing the
+ * perf state of the power domain
+ */
+ if (sync)
+ return rpmh_write(pd->dev, state, &cmd, 1);
+ else
+ return rpmh_write_async(pd->dev, state, &cmd, 1);
+}
+
+static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+/*
+ * This function is used to aggregate the votes across the active only
+ * resources and its peers. The aggregated votes are sent to RPMh as
+ * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
+ * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
+ * on system sleep).
+ * We send ACTIVE_ONLY votes for resources without any peers. For others,
+ * which have an active only peer, all 3 votes are sent.
+ */
+static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
+{
+ int ret;
+ struct rpmhpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
+ active_corner > pd->active_corner);
+ if (ret)
+ return ret;
+
+ pd->active_corner = active_corner;
+
+ if (peer) {
+ peer->active_corner = active_corner;
+
+ ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
+ active_corner, false);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
+ false);
+ }
+
+ return ret;
+}
+
+static int rpmhpd_power_on(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ if (pd->corner)
+ ret = rpmhpd_aggregate_corner(pd, pd->corner);
+
+ if (!ret)
+ pd->enabled = true;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_power_off(struct generic_pm_domain *domain)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0;
+
+ mutex_lock(&rpmhpd_lock);
+
+ ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
+
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
+ unsigned int level)
+{
+ struct rpmhpd *pd = domain_to_rpmhpd(domain);
+ int ret = 0, i;
+
+ mutex_lock(&rpmhpd_lock);
+
+ for (i = 0; i < pd->level_count; i++)
+ if (level <= pd->level[i])
+ break;
+
+ /*
+ * If the level requested is more than that supported by the
+ * max corner, just set it to max anyway.
+ */
+ if (i == pd->level_count)
+ i--;
+
+ if (pd->enabled) {
+ ret = rpmhpd_aggregate_corner(pd, i);
+ if (ret)
+ goto out;
+ }
+
+ pd->corner = i;
+out:
+ mutex_unlock(&rpmhpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
+{
+ int i;
+ const u16 *buf;
+
+ buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* 2 bytes used for each command DB aux data entry */
+ rpmhpd->level_count >>= 1;
+
+ if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
+ return -EINVAL;
+
+ for (i = 0; i < rpmhpd->level_count; i++) {
+ rpmhpd->level[i] = buf[i];
+
+ /*
+ * The AUX data may be zero padded. These 0 valued entries at
+ * the end of the map must be ignored.
+ */
+ if (i > 0 && rpmhpd->level[i] == 0) {
+ rpmhpd->level_count = i;
+ break;
+ }
+ pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
+ rpmhpd->level[i]);
+ }
+
+ return 0;
+}
+
+static int rpmhpd_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ size_t num_pds;
+ struct device *dev = &pdev->dev;
+ struct genpd_onecell_data *data;
+ struct rpmhpd **rpmhpds;
+ const struct rpmhpd_desc *desc;
+
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmhpds = desc->rpmhpds;
+ num_pds = desc->num_pds;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num_pds;
+
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i]) {
+ dev_warn(dev, "rpmhpds[%d] is empty\n", i);
+ continue;
+ }
+
+ rpmhpds[i]->dev = dev;
+ rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
+ if (!rpmhpds[i]->addr) {
+ dev_err(dev, "Could not find RPMh address for resource %s\n",
+ rpmhpds[i]->res_name);
+ return -ENODEV;
+ }
+
+ ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
+ if (ret != CMD_DB_HW_ARC) {
+ dev_err(dev, "RPMh slave ID mismatch\n");
+ return -EINVAL;
+ }
+
+ ret = rpmhpd_update_level_mapping(rpmhpds[i]);
+ if (ret)
+ return ret;
+
+ rpmhpds[i]->pd.power_off = rpmhpd_power_off;
+ rpmhpds[i]->pd.power_on = rpmhpd_power_on;
+ rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
+ rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
+ pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmhpds[i]->pd;
+ }
+
+ /* Add subdomains */
+ for (i = 0; i < num_pds; i++) {
+ if (!rpmhpds[i])
+ continue;
+ if (rpmhpds[i]->parent)
+ pm_genpd_add_subdomain(rpmhpds[i]->parent,
+ &rpmhpds[i]->pd);
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmhpd_driver = {
+ .driver = {
+ .name = "qcom-rpmhpd",
+ .of_match_table = rpmhpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmhpd_probe,
+};
+
+static int __init rpmhpd_init(void)
+{
+ return platform_driver_register(&rpmhpd_driver);
+}
+core_initcall(rpmhpd_init);
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
new file mode 100644
index 000000000000..005326050c23
--- /dev/null
+++ b/drivers/soc/qcom/rpmpd.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pm_domain.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/power/qcom-rpmpd.h>
+
+#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
+
+/* Resource types */
+#define RPMPD_SMPA 0x61706d73
+#define RPMPD_LDOA 0x616f646c
+
+/* Operation Keys */
+#define KEY_CORNER 0x6e726f63 /* corn */
+#define KEY_ENABLE 0x6e657773 /* swen */
+#define KEY_FLOOR_CORNER 0x636676 /* vfc */
+
+#define MAX_RPMPD_STATE 6
+
+#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \
+ static struct rpmpd _platform##_##_active; \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .peer = &_platform##_##_active, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }; \
+ static struct rpmpd _platform##_##_active = { \
+ .pd = { .name = #_active, }, \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .res_type = RPMPD_SMPA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = RPMPD_LDOA, \
+ .res_id = r_id, \
+ .key = KEY_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \
+ static struct rpmpd _platform##_##_name = { \
+ .pd = { .name = #_name, }, \
+ .res_type = r_type, \
+ .res_id = r_id, \
+ .key = KEY_FLOOR_CORNER, \
+ }
+
+#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA)
+
+#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \
+ DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA)
+
+struct rpmpd_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpmpd {
+ struct generic_pm_domain pd;
+ struct rpmpd *peer;
+ const bool active_only;
+ unsigned int corner;
+ bool enabled;
+ const char *res_name;
+ const int res_type;
+ const int res_id;
+ struct qcom_smd_rpm *rpm;
+ __le32 key;
+};
+
+struct rpmpd_desc {
+ struct rpmpd **rpmpds;
+ size_t num_pds;
+};
+
+static DEFINE_MUTEX(rpmpd_lock);
+
+/* msm8996 RPM Power domains */
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1);
+DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2);
+DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26);
+
+DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1);
+DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26);
+
+static struct rpmpd *msm8996_rpmpds[] = {
+ [MSM8996_VDDCX] = &msm8996_vddcx,
+ [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao,
+ [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc,
+ [MSM8996_VDDMX] = &msm8996_vddmx,
+ [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao,
+ [MSM8996_VDDSSCX] = &msm8996_vddsscx,
+ [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc,
+};
+
+static const struct rpmpd_desc msm8996_desc = {
+ .rpmpds = msm8996_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8996_rpmpds),
+};
+
+static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
+ { }
+};
+
+static int rpmpd_send_enable(struct rpmpd *pd, bool enable)
+{
+ struct rpmpd_req req = {
+ .key = KEY_ENABLE,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(enable),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ pd->res_type, pd->res_id, &req, sizeof(req));
+}
+
+static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner)
+{
+ struct rpmpd_req req = {
+ .key = pd->key,
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(corner),
+ };
+
+ return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id,
+ &req, sizeof(req));
+};
+
+static void to_active_sleep(struct rpmpd *pd, unsigned int corner,
+ unsigned int *active, unsigned int *sleep)
+{
+ *active = corner;
+
+ if (pd->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int rpmpd_aggregate_corner(struct rpmpd *pd)
+{
+ int ret;
+ struct rpmpd *peer = pd->peer;
+ unsigned int active_corner, sleep_corner;
+ unsigned int this_active_corner = 0, this_sleep_corner = 0;
+ unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
+
+ to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner);
+
+ if (peer && peer->enabled)
+ to_active_sleep(peer, peer->corner, &peer_active_corner,
+ &peer_sleep_corner);
+
+ active_corner = max(this_active_corner, peer_active_corner);
+
+ ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner);
+ if (ret)
+ return ret;
+
+ sleep_corner = max(this_sleep_corner, peer_sleep_corner);
+
+ return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner);
+}
+
+static int rpmpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, true);
+ if (ret)
+ goto out;
+
+ pd->enabled = true;
+
+ if (pd->corner)
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ mutex_lock(&rpmpd_lock);
+
+ ret = rpmpd_send_enable(pd, false);
+ if (!ret)
+ pd->enabled = false;
+
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static int rpmpd_set_performance(struct generic_pm_domain *domain,
+ unsigned int state)
+{
+ int ret = 0;
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ if (state > MAX_RPMPD_STATE)
+ goto out;
+
+ mutex_lock(&rpmpd_lock);
+
+ pd->corner = state;
+
+ if (!pd->enabled && pd->key != KEY_FLOOR_CORNER)
+ goto out;
+
+ ret = rpmpd_aggregate_corner(pd);
+
+out:
+ mutex_unlock(&rpmpd_lock);
+
+ return ret;
+}
+
+static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd,
+ struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_level(opp);
+}
+
+static int rpmpd_probe(struct platform_device *pdev)
+{
+ int i;
+ size_t num;
+ struct genpd_onecell_data *data;
+ struct qcom_smd_rpm *rpm;
+ struct rpmpd **rpmpds;
+ const struct rpmpd_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpmpds = desc->rpmpds;
+ num = desc->num_pds;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ data->num_domains = num;
+
+ for (i = 0; i < num; i++) {
+ if (!rpmpds[i]) {
+ dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n",
+ i);
+ continue;
+ }
+
+ rpmpds[i]->rpm = rpm;
+ rpmpds[i]->pd.power_off = rpmpd_power_off;
+ rpmpds[i]->pd.power_on = rpmpd_power_on;
+ rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+ rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance;
+ pm_genpd_init(&rpmpds[i]->pd, NULL, true);
+
+ data->domains[i] = &rpmpds[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
+}
+
+static struct platform_driver rpmpd_driver = {
+ .driver = {
+ .name = "qcom-rpmpd",
+ .of_match_table = rpmpd_match_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rpmpd_probe,
+};
+
+static int __init rpmpd_init(void)
+{
+ return platform_driver_register(&rpmpd_driver);
+}
+core_initcall(rpmpd_init);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index b8e63724a49d..9956bb2c63f2 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8996" },
{ .compatible = "qcom,rpm-msm8998" },
+ { .compatible = "qcom,rpm-sdm660" },
{ .compatible = "qcom,rpm-qcs404" },
{}
};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index a33ee8ef8b6b..51625703399e 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fuse->phys = res->start;
fuse->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fuse->base))
- return PTR_ERR(fuse->base);
+ if (IS_ERR(fuse->base)) {
+ err = PTR_ERR(fuse->base);
+ fuse->base = base;
+ return err;
+ }
fuse->clk = devm_clk_get(&pdev->dev, "fuse");
if (IS_ERR(fuse->clk)) {
dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
PTR_ERR(fuse->clk));
+ fuse->base = base;
return PTR_ERR(fuse->clk);
}
@@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
- if (err < 0)
+ if (err < 0) {
+ fuse->base = base;
return err;
+ }
}
if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 5373f4c16b54..8ed35d9851f8 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -131,7 +131,7 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
- soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+ soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 7ea3280279ff..0df258518693 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -20,7 +20,7 @@
#define pr_fmt(fmt) "tegra-pmc: " fmt
-#include <linux/kernel.h>
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
@@ -30,16 +30,17 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/irq.h>
#include <linux/irqdomain.h>
-#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_clk.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/reboot.h>
@@ -145,6 +146,11 @@
#define WAKE_AOWAKE_CTRL 0x4f4
#define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0)
+/* for secure PMC */
+#define TEGRA_SMC_PMC 0xc2fffe00
+#define TEGRA_SMC_PMC_READ 0xaa
+#define TEGRA_SMC_PMC_WRITE 0xbb
+
struct tegra_powergate {
struct generic_pm_domain genpd;
struct tegra_pmc *pmc;
@@ -216,6 +222,7 @@ struct tegra_pmc_soc {
bool has_gpu_clamps;
bool needs_mbist_war;
bool has_impl_33v_pwr;
+ bool maybe_tz_only;
const struct tegra_io_pad_soc *io_pads;
unsigned int num_io_pads;
@@ -273,8 +280,12 @@ static const char * const tegra30_reset_sources[] = {
* struct tegra_pmc - NVIDIA Tegra PMC
* @dev: pointer to PMC device structure
* @base: pointer to I/O remapped register region
+ * @wake: pointer to I/O remapped region for WAKE registers
+ * @aotag: pointer to I/O remapped region for AOTAG registers
+ * @scratch: pointer to I/O remapped region for scratch registers
* @clk: pointer to pclk clock
* @soc: pointer to SoC data structure
+ * @tz_only: flag specifying if the PMC can only be accessed via TrustZone
* @debugfs: pointer to debugfs entry
* @rate: currently configured rate of pclk
* @suspend_mode: lowest suspend mode available
@@ -291,6 +302,9 @@ static const char * const tegra30_reset_sources[] = {
* @lp0_vec_size: size of the LP0 warm boot code
* @powergates_available: Bitmap of available power gates
* @powergates_lock: mutex for power gate register access
+ * @pctl_dev: pin controller exposed by the PMC
+ * @domain: IRQ domain provided by the PMC
+ * @irq: chip implementation for the IRQ domain
*/
struct tegra_pmc {
struct device *dev;
@@ -302,6 +316,7 @@ struct tegra_pmc {
struct dentry *debugfs;
const struct tegra_pmc_soc *soc;
+ bool tz_only;
unsigned long rate;
@@ -338,30 +353,85 @@ to_powergate(struct generic_pm_domain *domain)
return container_of(domain, struct tegra_powergate, genpd);
}
-static u32 tegra_pmc_readl(unsigned long offset)
+static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset)
{
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_READ, offset, 0, 0,
+ 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+
+ return res.a1;
+ }
+
return readl(pmc->base + offset);
}
-static void tegra_pmc_writel(u32 value, unsigned long offset)
+static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
{
- writel(value, pmc->base + offset);
+ struct arm_smccc_res res;
+
+ if (pmc->tz_only) {
+ arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_WRITE, offset,
+ value, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ if (pmc->dev)
+ dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
+ __func__, res.a0);
+ else
+ pr_warn("%s(): SMC failed: %lu\n", __func__,
+ res.a0);
+ }
+ } else {
+ writel(value, pmc->base + offset);
+ }
}
+static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset)
+{
+ if (pmc->tz_only)
+ return tegra_pmc_readl(pmc, offset);
+
+ return readl(pmc->scratch + offset);
+}
+
+static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value,
+ unsigned long offset)
+{
+ if (pmc->tz_only)
+ tegra_pmc_writel(pmc, value, offset);
+ else
+ writel(value, pmc->scratch + offset);
+}
+
+/*
+ * TODO Figure out a way to call this with the struct tegra_pmc * passed in.
+ * This currently doesn't work because readx_poll_timeout() can only operate
+ * on functions that take a single argument.
+ */
static inline bool tegra_powergate_state(int id)
{
if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
- return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0;
+ return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0;
else
- return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0;
+ return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0;
}
-static inline bool tegra_powergate_is_valid(int id)
+static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id)
{
return (pmc->soc && pmc->soc->powergates[id]);
}
-static inline bool tegra_powergate_is_available(int id)
+static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id)
{
return test_bit(id, pmc->powergates_available);
}
@@ -374,7 +444,7 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
return -EINVAL;
for (i = 0; i < pmc->soc->num_powergates; i++) {
- if (!tegra_powergate_is_valid(i))
+ if (!tegra_powergate_is_valid(pmc, i))
continue;
if (!strcmp(name, pmc->soc->powergates[i]))
@@ -386,10 +456,12 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
/**
* tegra_powergate_set() - set the state of a partition
+ * @pmc: power management controller
* @id: partition ID
* @new_state: new state of the partition
*/
-static int tegra_powergate_set(unsigned int id, bool new_state)
+static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
+ bool new_state)
{
bool status;
int err;
@@ -404,7 +476,7 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
return 0;
}
- tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+ tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
err = readx_poll_timeout(tegra_powergate_state, id, status,
status == new_state, 10, 100000);
@@ -414,7 +486,8 @@ static int tegra_powergate_set(unsigned int id, bool new_state)
return err;
}
-static int __tegra_powergate_remove_clamping(unsigned int id)
+static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc,
+ unsigned int id)
{
u32 mask;
@@ -426,7 +499,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
*/
if (id == TEGRA_POWERGATE_3D) {
if (pmc->soc->has_gpu_clamps) {
- tegra_pmc_writel(0, GPU_RG_CNTRL);
+ tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL);
goto out;
}
}
@@ -442,7 +515,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id)
else
mask = (1 << id);
- tegra_pmc_writel(mask, REMOVE_CLAMPING);
+ tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING);
out:
mutex_unlock(&pmc->powergates_lock);
@@ -494,7 +567,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
usleep_range(10, 20);
- err = tegra_powergate_set(pg->id, true);
+ err = tegra_powergate_set(pg->pmc, pg->id, true);
if (err < 0)
return err;
@@ -506,7 +579,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
usleep_range(10, 20);
- err = __tegra_powergate_remove_clamping(pg->id);
+ err = __tegra_powergate_remove_clamping(pg->pmc, pg->id);
if (err)
goto disable_clks;
@@ -533,7 +606,7 @@ disable_clks:
usleep_range(10, 20);
powergate_off:
- tegra_powergate_set(pg->id, false);
+ tegra_powergate_set(pg->pmc, pg->id, false);
return err;
}
@@ -558,7 +631,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg)
usleep_range(10, 20);
- err = tegra_powergate_set(pg->id, false);
+ err = tegra_powergate_set(pg->pmc, pg->id, false);
if (err)
goto assert_resets;
@@ -579,12 +652,13 @@ disable_clks:
static int tegra_genpd_power_on(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
int err;
err = tegra_powergate_power_up(pg, true);
if (err)
- pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name,
- err);
+ dev_err(dev, "failed to turn on PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -592,12 +666,13 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
static int tegra_genpd_power_off(struct generic_pm_domain *domain)
{
struct tegra_powergate *pg = to_powergate(domain);
+ struct device *dev = pg->pmc->dev;
int err;
err = tegra_powergate_power_down(pg);
if (err)
- pr_err("failed to turn off PM domain %s: %d\n",
- pg->genpd.name, err);
+ dev_err(dev, "failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
return err;
}
@@ -608,10 +683,10 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain)
*/
int tegra_powergate_power_on(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return tegra_powergate_set(id, true);
+ return tegra_powergate_set(pmc, id, true);
}
/**
@@ -620,20 +695,21 @@ int tegra_powergate_power_on(unsigned int id)
*/
int tegra_powergate_power_off(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return tegra_powergate_set(id, false);
+ return tegra_powergate_set(pmc, id, false);
}
EXPORT_SYMBOL(tegra_powergate_power_off);
/**
* tegra_powergate_is_powered() - check if partition is powered
+ * @pmc: power management controller
* @id: partition ID
*/
-int tegra_powergate_is_powered(unsigned int id)
+static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id)
{
- if (!tegra_powergate_is_valid(id))
+ if (!tegra_powergate_is_valid(pmc, id))
return -EINVAL;
return tegra_powergate_state(id);
@@ -645,10 +721,10 @@ int tegra_powergate_is_powered(unsigned int id)
*/
int tegra_powergate_remove_clamping(unsigned int id)
{
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
- return __tegra_powergate_remove_clamping(id);
+ return __tegra_powergate_remove_clamping(pmc, id);
}
EXPORT_SYMBOL(tegra_powergate_remove_clamping);
@@ -666,7 +742,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct tegra_powergate *pg;
int err;
- if (!tegra_powergate_is_available(id))
+ if (!tegra_powergate_is_available(pmc, id))
return -EINVAL;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
@@ -681,7 +757,8 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
err = tegra_powergate_power_up(pg, false);
if (err)
- pr_err("failed to turn on partition %d: %d\n", id, err);
+ dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id,
+ err);
kfree(pg);
@@ -691,12 +768,14 @@ EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
/**
* tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
+ * @pmc: power management controller
* @cpuid: CPU partition ID
*
* Returns the partition ID corresponding to the CPU partition ID or a
* negative error code on failure.
*/
-static int tegra_get_cpu_powergate_id(unsigned int cpuid)
+static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc,
+ unsigned int cpuid)
{
if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
return pmc->soc->cpu_powergates[cpuid];
@@ -712,11 +791,11 @@ bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return false;
- return tegra_powergate_is_powered(id);
+ return tegra_powergate_is_powered(pmc, id);
}
/**
@@ -727,11 +806,11 @@ int tegra_pmc_cpu_power_on(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return id;
- return tegra_powergate_set(id, true);
+ return tegra_powergate_set(pmc, id, true);
}
/**
@@ -742,7 +821,7 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
{
int id;
- id = tegra_get_cpu_powergate_id(cpuid);
+ id = tegra_get_cpu_powergate_id(pmc, cpuid);
if (id < 0)
return id;
@@ -755,7 +834,7 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
const char *cmd = data;
u32 value;
- value = readl(pmc->scratch + pmc->soc->regs->scratch0);
+ value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
value &= ~PMC_SCRATCH0_MODE_MASK;
if (cmd) {
@@ -769,12 +848,12 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
value |= PMC_SCRATCH0_MODE_RCM;
}
- writel(value, pmc->scratch + pmc->soc->regs->scratch0);
+ tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
/* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_MAIN_RST;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
return NOTIFY_DONE;
}
@@ -793,7 +872,7 @@ static int powergate_show(struct seq_file *s, void *data)
seq_printf(s, "------------------\n");
for (i = 0; i < pmc->soc->num_powergates; i++) {
- status = tegra_powergate_is_powered(i);
+ status = tegra_powergate_is_powered(pmc, i);
if (status < 0)
continue;
@@ -855,12 +934,13 @@ err:
static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
struct device_node *np, bool off)
{
+ struct device *dev = pg->pmc->dev;
int err;
pg->reset = of_reset_control_array_get_exclusive(np);
if (IS_ERR(pg->reset)) {
err = PTR_ERR(pg->reset);
- pr_err("failed to get device resets: %d\n", err);
+ dev_err(dev, "failed to get device resets: %d\n", err);
return err;
}
@@ -877,6 +957,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
{
+ struct device *dev = pmc->dev;
struct tegra_powergate *pg;
int id, err;
bool off;
@@ -887,7 +968,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- pr_err("powergate lookup failed for %pOFn: %d\n", np, id);
+ dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id);
goto free_mem;
}
@@ -903,17 +984,17 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
pg->genpd.power_on = tegra_genpd_power_on;
pg->pmc = pmc;
- off = !tegra_powergate_is_powered(pg->id);
+ off = !tegra_powergate_is_powered(pmc, pg->id);
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- pr_err("failed to get clocks for %pOFn: %d\n", np, err);
+ dev_err(dev, "failed to get clocks for %pOFn: %d\n", np, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- pr_err("failed to get resets for %pOFn: %d\n", np, err);
+ dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
@@ -926,19 +1007,19 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = pm_genpd_init(&pg->genpd, NULL, off);
if (err < 0) {
- pr_err("failed to initialise PM domain %pOFn: %d\n", np,
+ dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np,
err);
goto remove_resets;
}
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- pr_err("failed to add PM domain provider for %pOFn: %d\n",
- np, err);
+ dev_err(dev, "failed to add PM domain provider for %pOFn: %d\n",
+ np, err);
goto remove_genpd;
}
- pr_debug("added PM domain %s\n", pg->genpd.name);
+ dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
return;
@@ -994,7 +1075,8 @@ tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
return NULL;
}
-static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
+static int tegra_io_pad_get_dpd_register_bit(struct tegra_pmc *pmc,
+ enum tegra_io_pad id,
unsigned long *request,
unsigned long *status,
u32 *mask)
@@ -1003,7 +1085,7 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
pad = tegra_io_pad_find(pmc, id);
if (!pad) {
- pr_err("invalid I/O pad ID %u\n", id);
+ dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
return -ENOENT;
}
@@ -1023,43 +1105,44 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id,
return 0;
}
-static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request,
- unsigned long *status, u32 *mask)
+static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id,
+ unsigned long *request, unsigned long *status,
+ u32 *mask)
{
unsigned long rate, value;
int err;
- err = tegra_io_pad_get_dpd_register_bit(id, request, status, mask);
+ err = tegra_io_pad_get_dpd_register_bit(pmc, id, request, status, mask);
if (err)
return err;
if (pmc->clk) {
rate = clk_get_rate(pmc->clk);
if (!rate) {
- pr_err("failed to get clock rate\n");
+ dev_err(pmc->dev, "failed to get clock rate\n");
return -ENODEV;
}
- tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
+ tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE);
/* must be at least 200 ns, in APB (PCLK) clock cycles */
value = DIV_ROUND_UP(1000000000, rate);
value = DIV_ROUND_UP(200, value);
- tegra_pmc_writel(value, SEL_DPD_TIM);
+ tegra_pmc_writel(pmc, value, SEL_DPD_TIM);
}
return 0;
}
-static int tegra_io_pad_poll(unsigned long offset, u32 mask,
- u32 val, unsigned long timeout)
+static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset,
+ u32 mask, u32 val, unsigned long timeout)
{
u32 value;
timeout = jiffies + msecs_to_jiffies(timeout);
while (time_after(timeout, jiffies)) {
- value = tegra_pmc_readl(offset);
+ value = tegra_pmc_readl(pmc, offset);
if ((value & mask) == val)
return 0;
@@ -1069,10 +1152,10 @@ static int tegra_io_pad_poll(unsigned long offset, u32 mask,
return -ETIMEDOUT;
}
-static void tegra_io_pad_unprepare(void)
+static void tegra_io_pad_unprepare(struct tegra_pmc *pmc)
{
if (pmc->clk)
- tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
+ tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE);
}
/**
@@ -1089,21 +1172,21 @@ int tegra_io_pad_power_enable(enum tegra_io_pad id)
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
if (err < 0) {
- pr_err("failed to prepare I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request);
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request);
- err = tegra_io_pad_poll(status, mask, 0, 250);
+ err = tegra_io_pad_poll(pmc, status, mask, 0, 250);
if (err < 0) {
- pr_err("failed to enable I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err);
goto unlock;
}
- tegra_io_pad_unprepare();
+ tegra_io_pad_unprepare(pmc);
unlock:
mutex_unlock(&pmc->powergates_lock);
@@ -1125,21 +1208,21 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id)
mutex_lock(&pmc->powergates_lock);
- err = tegra_io_pad_prepare(id, &request, &status, &mask);
+ err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
if (err < 0) {
- pr_err("failed to prepare I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
goto unlock;
}
- tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request);
+ tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request);
- err = tegra_io_pad_poll(status, mask, mask, 250);
+ err = tegra_io_pad_poll(pmc, status, mask, mask, 250);
if (err < 0) {
- pr_err("failed to disable I/O pad: %d\n", err);
+ dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err);
goto unlock;
}
- tegra_io_pad_unprepare();
+ tegra_io_pad_unprepare(pmc);
unlock:
mutex_unlock(&pmc->powergates_lock);
@@ -1147,22 +1230,24 @@ unlock:
}
EXPORT_SYMBOL(tegra_io_pad_power_disable);
-static int tegra_io_pad_is_powered(enum tegra_io_pad id)
+static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
unsigned long request, status;
u32 mask, value;
int err;
- err = tegra_io_pad_get_dpd_register_bit(id, &request, &status, &mask);
+ err = tegra_io_pad_get_dpd_register_bit(pmc, id, &request, &status,
+ &mask);
if (err)
return err;
- value = tegra_pmc_readl(status);
+ value = tegra_pmc_readl(pmc, status);
return !(value & mask);
}
-static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
+static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id,
+ int voltage)
{
const struct tegra_io_pad_soc *pad;
u32 value;
@@ -1177,29 +1262,29 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
mutex_lock(&pmc->powergates_lock);
if (pmc->soc->has_impl_33v_pwr) {
- value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
value &= ~BIT(pad->voltage);
else
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR);
+ tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR);
} else {
/* write-enable PMC_PWR_DET_VALUE[pad->voltage] */
- value = tegra_pmc_readl(PMC_PWR_DET);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET);
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_PWR_DET);
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET);
/* update I/O voltage */
- value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8)
value &= ~BIT(pad->voltage);
else
value |= BIT(pad->voltage);
- tegra_pmc_writel(value, PMC_PWR_DET_VALUE);
+ tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE);
}
mutex_unlock(&pmc->powergates_lock);
@@ -1209,7 +1294,7 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage)
return 0;
}
-static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
+static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
{
const struct tegra_io_pad_soc *pad;
u32 value;
@@ -1222,9 +1307,9 @@ static int tegra_io_pad_get_voltage(enum tegra_io_pad id)
return -ENOTSUPP;
if (pmc->soc->has_impl_33v_pwr)
- value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR);
+ value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
else
- value = tegra_pmc_readl(PMC_PWR_DET_VALUE);
+ value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
if ((value & BIT(pad->voltage)) == 0)
return TEGRA_IO_PAD_VOLTAGE_1V8;
@@ -1296,21 +1381,21 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
do_div(ticks, USEC_PER_SEC);
- tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER);
+ tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
wmb();
pmc->rate = rate;
}
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
value |= PMC_CNTRL_CPU_PWRREQ_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
#endif
@@ -1432,13 +1517,13 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux))
pinmux = 0;
- value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
value |= PMC_SENSOR_CTRL_SCRATCH_WRITE;
- tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) |
(reg_addr << PMC_SCRATCH54_ADDR_SHIFT);
- tegra_pmc_writel(value, PMC_SCRATCH54);
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH54);
value = PMC_SCRATCH55_RESET_TEGRA;
value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT;
@@ -1456,11 +1541,11 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT;
- tegra_pmc_writel(value, PMC_SCRATCH55);
+ tegra_pmc_writel(pmc, value, PMC_SCRATCH55);
- value = tegra_pmc_readl(PMC_SENSOR_CTRL);
+ value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
value |= PMC_SENSOR_CTRL_ENABLE_RST;
- tegra_pmc_writel(value, PMC_SENSOR_CTRL);
+ tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
dev_info(pmc->dev, "emergency thermal reset enabled\n");
@@ -1470,12 +1555,16 @@ out:
static int tegra_io_pad_pinctrl_get_groups_count(struct pinctrl_dev *pctl_dev)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
return pmc->soc->num_io_pads;
}
-static const char *tegra_io_pad_pinctrl_get_group_name(
- struct pinctrl_dev *pctl, unsigned int group)
+static const char *tegra_io_pad_pinctrl_get_group_name(struct pinctrl_dev *pctl,
+ unsigned int group)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl);
+
return pmc->soc->io_pads[group].name;
}
@@ -1484,8 +1573,11 @@ static int tegra_io_pad_pinctrl_get_group_pins(struct pinctrl_dev *pctl_dev,
const unsigned int **pins,
unsigned int *num_pins)
{
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+
*pins = &pmc->soc->io_pads[group].id;
*num_pins = 1;
+
return 0;
}
@@ -1500,27 +1592,33 @@ static const struct pinctrl_ops tegra_io_pad_pinctrl_ops = {
static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev,
unsigned int pin, unsigned long *config)
{
- const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
enum pin_config_param param = pinconf_to_config_param(*config);
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
int ret;
u32 arg;
+ pad = tegra_io_pad_find(pmc, pin);
if (!pad)
return -EINVAL;
switch (param) {
case PIN_CONFIG_POWER_SOURCE:
- ret = tegra_io_pad_get_voltage(pad->id);
+ ret = tegra_io_pad_get_voltage(pmc, pad->id);
if (ret < 0)
return ret;
+
arg = ret;
break;
+
case PIN_CONFIG_LOW_POWER_MODE:
- ret = tegra_io_pad_is_powered(pad->id);
+ ret = tegra_io_pad_is_powered(pmc, pad->id);
if (ret < 0)
return ret;
+
arg = !ret;
break;
+
default:
return -EINVAL;
}
@@ -1534,12 +1632,14 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
unsigned int pin, unsigned long *configs,
unsigned int num_configs)
{
- const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin);
+ struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
+ const struct tegra_io_pad_soc *pad;
enum pin_config_param param;
unsigned int i;
int err;
u32 arg;
+ pad = tegra_io_pad_find(pmc, pin);
if (!pad)
return -EINVAL;
@@ -1560,7 +1660,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev,
if (arg != TEGRA_IO_PAD_VOLTAGE_1V8 &&
arg != TEGRA_IO_PAD_VOLTAGE_3V3)
return -EINVAL;
- err = tegra_io_pad_set_voltage(pad->id, arg);
+ err = tegra_io_pad_set_voltage(pmc, pad->id, arg);
if (err)
return err;
break;
@@ -1585,7 +1685,7 @@ static struct pinctrl_desc tegra_pmc_pctl_desc = {
static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
{
- int err = 0;
+ int err;
if (!pmc->soc->num_pin_descs)
return 0;
@@ -1598,18 +1698,20 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
pmc);
if (IS_ERR(pmc->pctl_dev)) {
err = PTR_ERR(pmc->pctl_dev);
- dev_err(pmc->dev, "unable to register pinctrl, %d\n", err);
+ dev_err(pmc->dev, "failed to register pin controller: %d\n",
+ err);
+ return err;
}
- return err;
+ return 0;
}
static ssize_t reset_reason_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u32 value, rst_src;
- value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
rst_src = (value & pmc->soc->regs->rst_source_mask) >>
pmc->soc->regs->rst_source_shift;
@@ -1619,11 +1721,11 @@ static ssize_t reset_reason_show(struct device *dev,
static DEVICE_ATTR_RO(reset_reason);
static ssize_t reset_level_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u32 value, rst_lvl;
- value = tegra_pmc_readl(pmc->soc->regs->rst_status);
+ value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
rst_lvl = (value & pmc->soc->regs->rst_level_mask) >>
pmc->soc->regs->rst_level_shift;
@@ -1641,16 +1743,16 @@ static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc)
err = device_create_file(dev, &dev_attr_reset_reason);
if (err < 0)
dev_warn(dev,
- "failed to create attr \"reset_reason\": %d\n",
- err);
+ "failed to create attr \"reset_reason\": %d\n",
+ err);
}
if (pmc->soc->reset_levels) {
err = device_create_file(dev, &dev_attr_reset_level);
if (err < 0)
dev_warn(dev,
- "failed to create attr \"reset_level\": %d\n",
- err);
+ "failed to create attr \"reset_level\": %d\n",
+ err);
}
}
@@ -1920,6 +2022,8 @@ static int tegra_pmc_probe(struct platform_device *pdev)
pmc->base = base;
mutex_unlock(&pmc->powergates_lock);
+ platform_set_drvdata(pdev, pmc);
+
return 0;
cleanup_restart_handler:
@@ -1932,14 +2036,18 @@ cleanup_debugfs:
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM)
static int tegra_pmc_suspend(struct device *dev)
{
- tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41);
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41);
return 0;
}
static int tegra_pmc_resume(struct device *dev)
{
- tegra_pmc_writel(0x0, PMC_SCRATCH41);
+ struct tegra_pmc *pmc = dev_get_drvdata(dev);
+
+ tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41);
return 0;
}
@@ -1976,11 +2084,11 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
u32 value;
/* Always enable CPU power request */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_CPU_PWRREQ_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
if (pmc->sysclkreq_high)
value &= ~PMC_CNTRL_SYSCLK_POLARITY;
@@ -1988,12 +2096,12 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc)
value |= PMC_CNTRL_SYSCLK_POLARITY;
/* configure the output polarity while the request is tristated */
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
/* now enable the request */
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
value |= PMC_CNTRL_SYSCLK_OE;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
@@ -2002,14 +2110,14 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
{
u32 value;
- value = tegra_pmc_readl(PMC_CNTRL);
+ value = tegra_pmc_readl(pmc, PMC_CNTRL);
if (invert)
value |= PMC_CNTRL_INTR_POLARITY;
else
value &= ~PMC_CNTRL_INTR_POLARITY;
- tegra_pmc_writel(value, PMC_CNTRL);
+ tegra_pmc_writel(pmc, value, PMC_CNTRL);
}
static const struct tegra_pmc_soc tegra20_pmc_soc = {
@@ -2019,6 +2127,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2063,7 +2174,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.cpu_powergates = tegra30_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2112,7 +2225,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.cpu_powergates = tegra114_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = 0,
.io_pads = NULL,
.num_pin_descs = 0,
@@ -2221,7 +2336,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.cpu_powergates = tegra124_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra124_io_pads),
.io_pads = tegra124_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra124_pin_descs),
@@ -2325,8 +2442,9 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.cpu_powergates = tegra210_cpu_powergates,
.has_tsense_reset = true,
.has_gpu_clamps = true,
- .has_impl_33v_pwr = false,
.needs_mbist_war = true,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = true,
.num_io_pads = ARRAY_SIZE(tegra210_io_pads),
.io_pads = tegra210_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra210_pin_descs),
@@ -2413,7 +2531,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
index = of_property_match_string(np, "reg-names", "wake");
if (index < 0) {
- pr_err("failed to find PMC wake registers\n");
+ dev_err(pmc->dev, "failed to find PMC wake registers\n");
return;
}
@@ -2421,7 +2539,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
wake = ioremap_nocache(regs.start, resource_size(&regs));
if (!wake) {
- pr_err("failed to map PMC wake registers\n");
+ dev_err(pmc->dev, "failed to map PMC wake registers\n");
return;
}
@@ -2438,7 +2556,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_wake_event tegra186_wake_events[] = {
- TEGRA_WAKE_GPIO("power", 29, 1, TEGRA_AON_GPIO(FF, 0)),
+ TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)),
TEGRA_WAKE_IRQ("rtc", 73, 10),
};
@@ -2449,7 +2567,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
.has_impl_33v_pwr = true,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra186_io_pads),
.io_pads = tegra186_io_pads,
.num_pin_descs = ARRAY_SIZE(tegra186_pin_descs),
@@ -2527,6 +2647,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = {
.cpu_powergates = NULL,
.has_tsense_reset = false,
.has_gpu_clamps = false,
+ .needs_mbist_war = false,
+ .has_impl_33v_pwr = false,
+ .maybe_tz_only = false,
.num_io_pads = ARRAY_SIZE(tegra194_io_pads),
.io_pads = tegra194_io_pads,
.regs = &tegra186_pmc_regs,
@@ -2561,6 +2684,32 @@ static struct platform_driver tegra_pmc_driver = {
};
builtin_platform_driver(tegra_pmc_driver);
+static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc)
+{
+ u32 value, saved;
+
+ saved = readl(pmc->base + pmc->soc->regs->scratch0);
+ value = saved ^ 0xffffffff;
+
+ if (value == 0xffffffff)
+ value = 0xdeadbeef;
+
+ /* write pattern and read it back */
+ writel(value, pmc->base + pmc->soc->regs->scratch0);
+ value = readl(pmc->base + pmc->soc->regs->scratch0);
+
+ /* if we read all-zeroes, access is restricted to TZ only */
+ if (value == 0) {
+ pr_info("access to PMC is restricted to TZ\n");
+ return true;
+ }
+
+ /* restore original value */
+ writel(saved, pmc->base + pmc->soc->regs->scratch0);
+
+ return false;
+}
+
/*
* Early initialization to allow access to registers in the very early boot
* process.
@@ -2623,6 +2772,9 @@ static int __init tegra_pmc_early_init(void)
if (np) {
pmc->soc = match->data;
+ if (pmc->soc->maybe_tz_only)
+ pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
+
tegra_powergate_init(pmc, np);
/*
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index e05ab16d9a9e..6285cd8efb21 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -598,7 +598,7 @@ static int pktdma_init_chan(struct knav_dma_device *dma,
INIT_LIST_HEAD(&chan->list);
chan->dma = dma;
- chan->direction = DMA_NONE;
+ chan->direction = DMA_TRANS_NONE;
atomic_set(&chan->ref_count, 0);
spin_lock_init(&chan->lock);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 687c8f3cd955..01e76b58dd78 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -17,4 +17,24 @@ config XILINX_VCU
To compile this driver as a module, choose M here: the
module will be called xlnx_vcu.
+config ZYNQMP_POWER
+ bool "Enable Xilinx Zynq MPSoC Power Management driver"
+ depends on PM && ARCH_ZYNQMP
+ default y
+ help
+ Say yes to enable power management support for ZyqnMP SoC.
+ This driver uses firmware driver as an interface for power
+ management request to firmware. It registers isr to handle
+ power management callbacks from firmware.
+ If in doubt, say N.
+
+config ZYNQMP_PM_DOMAINS
+ bool "Enable Zynq MPSoC generic PM domains"
+ default y
+ depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE
+ select PM_GENERIC_DOMAINS
+ help
+ Say yes to enable device power management through PM domains
+ If in doubt, say N.
+
endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index dee8fd51e303..f66bfea5de17 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o
+obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
+obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
new file mode 100644
index 000000000000..354d256e6e00
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ZynqMP Generic PM domain support
+ *
+ * Copyright (C) 2015-2018 Xilinx, Inc.
+ *
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define ZYNQMP_NUM_DOMAINS (100)
+/* Flag stating if PM nodes mapped to the PM domain has been requested */
+#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
+
+/**
+ * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
+ * @gpd: Generic power domain
+ * @node_id: PM node ID corresponding to device inside PM domain
+ * @flags: ZynqMP PM domain flags
+ */
+struct zynqmp_pm_domain {
+ struct generic_pm_domain gpd;
+ u32 node_id;
+ u8 flags;
+};
+
+/**
+ * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
+ * path
+ * @dev: Device to check for wakeup source path
+ * @not_used: Data member (not required)
+ *
+ * This function is checks device's child hierarchy and checks if any device is
+ * set as wakeup source.
+ *
+ * Return: 1 if device is in wakeup source path else 0
+ */
+static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
+{
+ int may_wakeup;
+
+ may_wakeup = device_may_wakeup(dev);
+ if (may_wakeup)
+ return may_wakeup;
+
+ return device_for_each_child(dev, NULL,
+ zynqmp_gpd_is_active_wakeup_path);
+}
+
+/**
+ * zynqmp_gpd_power_on() - Power on PM domain
+ * @domain: Generic PM domain
+ *
+ * This function is called before devices inside a PM domain are resumed, to
+ * power on PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_requirement)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+ ret = eemi_ops->set_requirement(pd->node_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS,
+ ZYNQMP_PM_MAX_QOS,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret) {
+ pr_err("%s() %s set requirement for node %d failed: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pr_debug("%s() Powered on %s domain\n", __func__, domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_power_off() - Power off PM domain
+ * @domain: Generic PM domain
+ *
+ * This function is called after devices inside a PM domain are suspended, to
+ * power off PM domain.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct pm_domain_data *pdd, *tmp;
+ struct zynqmp_pm_domain *pd;
+ u32 capabilities = 0;
+ bool may_wakeup;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_requirement)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If domain is already released there is nothing to be done */
+ if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) {
+ pr_debug("%s() %s domain is already released\n",
+ __func__, domain->name);
+ return 0;
+ }
+
+ list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) {
+ /* If device is in wakeup path, set capability to WAKEUP */
+ may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL);
+ if (may_wakeup) {
+ dev_dbg(pdd->dev, "device is in wakeup path in %s\n",
+ domain->name);
+ capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP;
+ break;
+ }
+ }
+
+ ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0,
+ ZYNQMP_PM_REQUEST_ACK_NO);
+ /**
+ * If powering down of any node inside this domain fails,
+ * report and return the error
+ */
+ if (ret) {
+ pr_err("%s() %s set requirement for node %d failed: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pr_debug("%s() Powered off %s domain\n", __func__, domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_attach_dev() - Attach device to the PM domain
+ * @domain: Generic PM domain
+ * @dev: Device to attach
+ *
+ * Return: 0 on success, error code otherwise
+ */
+static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->request_node)
+ return -ENXIO;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If this is not the first device to attach there is nothing to do */
+ if (domain->device_count)
+ return 0;
+
+ ret = eemi_ops->request_node(pd->node_id, 0, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ /* If requesting a node fails print and return the error */
+ if (ret) {
+ pr_err("%s() %s request failed for node %d: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return ret;
+ }
+
+ pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED;
+
+ pr_debug("%s() %s attached to %s domain\n", __func__,
+ dev_name(dev), domain->name);
+ return 0;
+}
+
+/**
+ * zynqmp_gpd_detach_dev() - Detach device from the PM domain
+ * @domain: Generic PM domain
+ * @dev: Device to detach
+ */
+static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ struct zynqmp_pm_domain *pd;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->release_node)
+ return;
+
+ pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+
+ /* If this is not the last device to detach there is nothing to do */
+ if (domain->device_count)
+ return;
+
+ ret = eemi_ops->release_node(pd->node_id);
+ /* If releasing a node fails print the error and return */
+ if (ret) {
+ pr_err("%s() %s release failed for node %d: %d\n",
+ __func__, domain->name, pd->node_id, ret);
+ return;
+ }
+
+ pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED;
+
+ pr_debug("%s() %s detached from %s domain\n", __func__,
+ dev_name(dev), domain->name);
+}
+
+static struct generic_pm_domain *zynqmp_gpd_xlate
+ (struct of_phandle_args *genpdspec, void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int i, idx = genpdspec->args[0];
+ struct zynqmp_pm_domain *pd;
+
+ pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd);
+
+ if (genpdspec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ /* Check for existing pm domains */
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+ if (pd[i].node_id == idx)
+ goto done;
+ }
+
+ /**
+ * Add index in empty node_id of power domain list as no existing
+ * power domain found for current index.
+ */
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) {
+ if (pd[i].node_id == 0) {
+ pd[i].node_id = idx;
+ break;
+ }
+ }
+
+done:
+ if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS)
+ return ERR_PTR(-ENOENT);
+
+ return genpd_data->domains[i];
+}
+
+static int zynqmp_gpd_probe(struct platform_device *pdev)
+{
+ int i;
+ struct genpd_onecell_data *zynqmp_pd_data;
+ struct generic_pm_domain **domains;
+ struct zynqmp_pm_domain *pd;
+ struct device *dev = &pdev->dev;
+
+ pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL);
+ if (!zynqmp_pd_data)
+ return -ENOMEM;
+
+ zynqmp_pd_data->xlate = zynqmp_gpd_xlate;
+
+ domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains),
+ GFP_KERNEL);
+ if (!domains)
+ return -ENOMEM;
+
+ for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) {
+ pd->node_id = 0;
+ pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i);
+ pd->gpd.power_off = zynqmp_gpd_power_off;
+ pd->gpd.power_on = zynqmp_gpd_power_on;
+ pd->gpd.attach_dev = zynqmp_gpd_attach_dev;
+ pd->gpd.detach_dev = zynqmp_gpd_detach_dev;
+
+ domains[i] = &pd->gpd;
+
+ /* Mark all PM domains as initially powered off */
+ pm_genpd_init(&pd->gpd, NULL, true);
+ }
+
+ zynqmp_pd_data->domains = domains;
+ zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS;
+ of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data);
+
+ return 0;
+}
+
+static int zynqmp_gpd_remove(struct platform_device *pdev)
+{
+ of_genpd_del_provider(pdev->dev.parent->of_node);
+
+ return 0;
+}
+
+static struct platform_driver zynqmp_power_domain_driver = {
+ .driver = {
+ .name = "zynqmp_power_controller",
+ },
+ .probe = zynqmp_gpd_probe,
+ .remove = zynqmp_gpd_remove,
+};
+module_platform_driver(zynqmp_power_domain_driver);
+
+MODULE_ALIAS("platform:zynqmp_power_controller");
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
new file mode 100644
index 000000000000..771cb59b9d22
--- /dev/null
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Zynq MPSoC Power Management
+ *
+ * Copyright (C) 2014-2018 Xilinx, Inc.
+ *
+ * Davorin Mista <davorin.mista@aggios.com>
+ * Jolly Shah <jollys@xilinx.com>
+ * Rajan Vaja <rajan.vaja@xilinx.com>
+ */
+
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+enum pm_suspend_mode {
+ PM_SUSPEND_MODE_FIRST = 0,
+ PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST,
+ PM_SUSPEND_MODE_POWER_OFF,
+};
+
+#define PM_SUSPEND_MODE_FIRST PM_SUSPEND_MODE_STD
+
+static const char *const suspend_modes[] = {
+ [PM_SUSPEND_MODE_STD] = "standard",
+ [PM_SUSPEND_MODE_POWER_OFF] = "power-off",
+};
+
+static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB,
+ PM_NOTIFY_CB,
+};
+
+static void zynqmp_pm_get_callback_data(u32 *buf)
+{
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static irqreturn_t zynqmp_pm_isr(int irq, void *data)
+{
+ u32 payload[CB_PAYLOAD_SIZE];
+
+ zynqmp_pm_get_callback_data(payload);
+
+ /* First element is callback API ID, others are callback arguments */
+ if (payload[0] == PM_INIT_SUSPEND_CB) {
+ switch (payload[1]) {
+ case SUSPEND_SYSTEM_SHUTDOWN:
+ orderly_poweroff(true);
+ break;
+ case SUSPEND_POWER_REQUEST:
+ pm_suspend(PM_SUSPEND_MEM);
+ break;
+ default:
+ pr_err("%s Unsupported InitSuspendCb reason "
+ "code %d\n", __func__, payload[1]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t suspend_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int md;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md]) {
+ if (md == suspend_mode)
+ s += sprintf(s, "[%s] ", suspend_modes[md]);
+ else
+ s += sprintf(s, "%s ", suspend_modes[md]);
+ }
+
+ /* Convert last space to newline */
+ if (s != buf)
+ *(s - 1) = '\n';
+ return (s - buf);
+}
+
+static ssize_t suspend_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int md, ret = -EINVAL;
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->set_suspend_mode)
+ return ret;
+
+ for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
+ if (suspend_modes[md] &&
+ sysfs_streq(suspend_modes[md], buf)) {
+ ret = 0;
+ break;
+ }
+
+ if (!ret && md != suspend_mode) {
+ ret = eemi_ops->set_suspend_mode(md);
+ if (likely(!ret))
+ suspend_mode = md;
+ }
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(suspend_mode);
+
+static int zynqmp_pm_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ u32 pm_api_version;
+
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+
+ if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize)
+ return -ENXIO;
+
+ eemi_ops->init_finalize();
+ eemi_ops->get_api_version(&pm_api_version);
+
+ /* Check PM API version number */
+ if (pm_api_version < ZYNQMP_PM_VERSION)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&pdev->dev), &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed "
+ "with %d\n", irq, ret);
+ return ret;
+ }
+
+ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to create sysfs interface\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_pm_remove(struct platform_device *pdev)
+{
+ sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+
+ return 0;
+}
+
+static const struct of_device_id pm_of_match[] = {
+ { .compatible = "xlnx,zynqmp-power", },
+ { /* end of table */ },
+};
+MODULE_DEVICE_TABLE(of, pm_of_match);
+
+static struct platform_driver zynqmp_pm_platform_driver = {
+ .probe = zynqmp_pm_probe,
+ .remove = zynqmp_pm_remove,
+ .driver = {
+ .name = "zynqmp_power",
+ .of_match_table = pm_of_match,
+ },
+};
+module_platform_driver(zynqmp_pm_platform_driver);
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 48d262ae2f04..56263ae3b1d7 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -5,3 +5,4 @@ optee-objs += call.o
optee-objs += rpc.o
optee-objs += supp.o
optee-objs += shm_pool.o
+optee-objs += device.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 947f9b28de9e..0842b6e6af82 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -634,6 +634,10 @@ static struct optee *optee_probe(struct device_node *np)
if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
+ rc = optee_enumerate_devices();
+ if (rc)
+ goto err;
+
pr_info("initialized driver\n");
return optee;
err:
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
new file mode 100644
index 000000000000..e3a148521ec1
--- /dev/null
+++ b/drivers/tee/optee/device.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/tee_drv.h>
+#include <linux/uuid.h>
+#include "optee_private.h"
+
+/*
+ * Get device UUIDs
+ *
+ * [out] memref[0] Array of device UUIDs
+ *
+ * Return codes:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_SHORT_BUFFER - Output buffer size less than required
+ */
+#define PTA_CMD_GET_DEVICES 0x0
+
+static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data)
+{
+ if (ver->impl_id == TEE_IMPL_ID_OPTEE)
+ return 1;
+ else
+ return 0;
+}
+
+static int get_devices(struct tee_context *ctx, u32 session,
+ struct tee_shm *device_shm, u32 *shm_size)
+{
+ int ret = 0;
+ struct tee_ioctl_invoke_arg inv_arg;
+ struct tee_param param[4];
+
+ memset(&inv_arg, 0, sizeof(inv_arg));
+ memset(&param, 0, sizeof(param));
+
+ /* Invoke PTA_CMD_GET_DEVICES function */
+ inv_arg.func = PTA_CMD_GET_DEVICES;
+ inv_arg.session = session;
+ inv_arg.num_params = 4;
+
+ /* Fill invoke cmd params */
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[0].u.memref.shm = device_shm;
+ param[0].u.memref.size = *shm_size;
+ param[0].u.memref.shm_offs = 0;
+
+ ret = tee_client_invoke_func(ctx, &inv_arg, param);
+ if ((ret < 0) || ((inv_arg.ret != TEEC_SUCCESS) &&
+ (inv_arg.ret != TEEC_ERROR_SHORT_BUFFER))) {
+ pr_err("PTA_CMD_GET_DEVICES invoke function err: %x\n",
+ inv_arg.ret);
+ return -EINVAL;
+ }
+
+ *shm_size = param[0].u.memref.size;
+
+ return 0;
+}
+
+static int optee_register_device(const uuid_t *device_uuid, u32 device_id)
+{
+ struct tee_client_device *optee_device = NULL;
+ int rc;
+
+ optee_device = kzalloc(sizeof(*optee_device), GFP_KERNEL);
+ if (!optee_device)
+ return -ENOMEM;
+
+ optee_device->dev.bus = &tee_bus_type;
+ dev_set_name(&optee_device->dev, "optee-clnt%u", device_id);
+ uuid_copy(&optee_device->id.uuid, device_uuid);
+
+ rc = device_register(&optee_device->dev);
+ if (rc) {
+ pr_err("device registration failed, err: %d\n", rc);
+ kfree(optee_device);
+ }
+
+ return rc;
+}
+
+int optee_enumerate_devices(void)
+{
+ const uuid_t pta_uuid =
+ UUID_INIT(0x7011a688, 0xddde, 0x4053,
+ 0xa5, 0xa9, 0x7b, 0x3c, 0x4d, 0xdf, 0x13, 0xb8);
+ struct tee_ioctl_open_session_arg sess_arg;
+ struct tee_shm *device_shm = NULL;
+ const uuid_t *device_uuid = NULL;
+ struct tee_context *ctx = NULL;
+ u32 shm_size = 0, idx, num_devices = 0;
+ int rc;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+
+ /* Open context with OP-TEE driver */
+ ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, NULL);
+ if (IS_ERR(ctx))
+ return -ENODEV;
+
+ /* Open session with device enumeration pseudo TA */
+ memcpy(sess_arg.uuid, pta_uuid.b, TEE_IOCTL_UUID_LEN);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != TEEC_SUCCESS)) {
+ /* Device enumeration pseudo TA not found */
+ rc = 0;
+ goto out_ctx;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, NULL, &shm_size);
+ if (rc < 0 || !shm_size)
+ goto out_sess;
+
+ device_shm = tee_shm_alloc(ctx, shm_size,
+ TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ if (IS_ERR(device_shm)) {
+ pr_err("tee_shm_alloc failed\n");
+ rc = PTR_ERR(device_shm);
+ goto out_sess;
+ }
+
+ rc = get_devices(ctx, sess_arg.session, device_shm, &shm_size);
+ if (rc < 0)
+ goto out_shm;
+
+ device_uuid = tee_shm_get_va(device_shm, 0);
+ if (IS_ERR(device_uuid)) {
+ pr_err("tee_shm_get_va failed\n");
+ rc = PTR_ERR(device_uuid);
+ goto out_shm;
+ }
+
+ num_devices = shm_size / sizeof(uuid_t);
+
+ for (idx = 0; idx < num_devices; idx++) {
+ rc = optee_register_device(&device_uuid[idx], idx);
+ if (rc)
+ goto out_shm;
+ }
+
+out_shm:
+ tee_shm_free(device_shm);
+out_sess:
+ tee_client_close_session(ctx, sess_arg.session);
+out_ctx:
+ tee_client_close_context(ctx);
+
+ return rc;
+}
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 30504901be80..795bc19ae17a 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -1,28 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2015-2019, Linaro Limited
*/
#ifndef _OPTEE_MSG_H
#define _OPTEE_MSG_H
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 35e79386c556..a5e84afd5013 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -28,6 +28,7 @@
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -181,6 +182,8 @@ void optee_free_pages_list(void *array, size_t num_entries);
void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
size_t page_offset);
+int optee_enumerate_devices(void);
+
/*
* Small helpers
*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index bbf0cf028c16..c72122d9c997 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -1,28 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
- * Copyright (c) 2015-2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (c) 2015-2019, Linaro Limited
*/
#ifndef OPTEE_SMC_H
#define OPTEE_SMC_H
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 43626e15703a..92f56b8645e3 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -88,10 +88,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
- struct optee_supp_req *req = kzalloc(sizeof(*req), GFP_KERNEL);
+ struct optee_supp_req *req;
bool interruptable;
u32 ret;
+ /*
+ * Return in case there is no supplicant available and
+ * non-blocking request.
+ */
+ if (!supp->ctx && ctx->supp_nowait)
+ return TEEC_ERROR_COMMUNICATION;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return TEEC_ERROR_OUT_OF_MEMORY;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 7b2bb4c50058..17c64fccbb10 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/cdev.h>
-#include <linux/device.h>
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/module.h>
@@ -106,6 +105,11 @@ static int tee_open(struct inode *inode, struct file *filp)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ /*
+ * Default user-space behaviour is to wait for tee-supplicant
+ * if not present for any requests in this context.
+ */
+ ctx->supp_nowait = false;
filp->private_data = ctx;
return 0;
}
@@ -982,6 +986,16 @@ tee_client_open_context(struct tee_context *start,
} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
put_device(put_dev);
+ /*
+ * Default behaviour for in kernel client is to not wait for
+ * tee-supplicant if not present for any requests in this context.
+ * Also this flag could be configured again before call to
+ * tee_client_open_session() if any in kernel client requires
+ * different behaviour.
+ */
+ if (!IS_ERR(ctx))
+ ctx->supp_nowait = true;
+
return ctx;
}
EXPORT_SYMBOL_GPL(tee_client_open_context);
@@ -1027,6 +1041,48 @@ int tee_client_invoke_func(struct tee_context *ctx,
}
EXPORT_SYMBOL_GPL(tee_client_invoke_func);
+int tee_client_cancel_req(struct tee_context *ctx,
+ struct tee_ioctl_cancel_arg *arg)
+{
+ if (!ctx->teedev->desc->ops->cancel_req)
+ return -EINVAL;
+ return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
+ arg->session);
+}
+
+static int tee_client_device_match(struct device *dev,
+ struct device_driver *drv)
+{
+ const struct tee_client_device_id *id_table;
+ struct tee_client_device *tee_device;
+
+ id_table = to_tee_client_driver(drv)->id_table;
+ tee_device = to_tee_client_device(dev);
+
+ while (!uuid_is_null(&id_table->uuid)) {
+ if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
+ return 1;
+ id_table++;
+ }
+
+ return 0;
+}
+
+static int tee_client_device_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
+
+ return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
+}
+
+struct bus_type tee_bus_type = {
+ .name = "tee",
+ .match = tee_client_device_match,
+ .uevent = tee_client_device_uevent,
+};
+EXPORT_SYMBOL_GPL(tee_bus_type);
+
static int __init tee_init(void)
{
int rc;
@@ -1040,18 +1096,32 @@ static int __init tee_init(void)
rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
if (rc) {
pr_err("failed to allocate char dev region\n");
- class_destroy(tee_class);
- tee_class = NULL;
+ goto out_unreg_class;
+ }
+
+ rc = bus_register(&tee_bus_type);
+ if (rc) {
+ pr_err("failed to register tee bus\n");
+ goto out_unreg_chrdev;
}
+ return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
+out_unreg_class:
+ class_destroy(tee_class);
+ tee_class = NULL;
+
return rc;
}
static void __exit tee_exit(void)
{
+ bus_unregister(&tee_bus_type);
+ unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
class_destroy(tee_class);
tee_class = NULL;
- unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
}
subsys_initcall(tee_init);
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index ed05514cc2dc..1834524ae373 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/types.h>
+#include <linux/mfd/bcm2835-pm.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/watchdog.h>
@@ -47,6 +48,8 @@ struct bcm2835_wdt {
spinlock_t lock;
};
+static struct bcm2835_wdt *bcm2835_power_off_wdt;
+
static unsigned int heartbeat;
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -148,10 +151,7 @@ static struct watchdog_device bcm2835_wdt_wdd = {
*/
static void bcm2835_power_off(void)
{
- struct device_node *np =
- of_find_compatible_node(NULL, NULL, "brcm,bcm2835-pm-wdt");
- struct platform_device *pdev = of_find_device_by_node(np);
- struct bcm2835_wdt *wdt = platform_get_drvdata(pdev);
+ struct bcm2835_wdt *wdt = bcm2835_power_off_wdt;
u32 val;
/*
@@ -169,7 +169,7 @@ static void bcm2835_power_off(void)
static int bcm2835_wdt_probe(struct platform_device *pdev)
{
- struct resource *res;
+ struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct bcm2835_wdt *wdt;
int err;
@@ -181,10 +181,7 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
spin_lock_init(&wdt->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wdt->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(wdt->base))
- return PTR_ERR(wdt->base);
+ wdt->base = pm->base;
watchdog_set_drvdata(&bcm2835_wdt_wdd, wdt);
watchdog_init_timeout(&bcm2835_wdt_wdd, heartbeat, dev);
@@ -211,8 +208,10 @@ static int bcm2835_wdt_probe(struct platform_device *pdev)
return err;
}
- if (pm_power_off == NULL)
+ if (pm_power_off == NULL) {
pm_power_off = bcm2835_power_off;
+ bcm2835_power_off_wdt = wdt;
+ }
dev_info(dev, "Broadcom BCM2835 watchdog timer");
return 0;
@@ -226,18 +225,11 @@ static int bcm2835_wdt_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id bcm2835_wdt_of_match[] = {
- { .compatible = "brcm,bcm2835-pm-wdt", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm2835_wdt_of_match);
-
static struct platform_driver bcm2835_wdt_driver = {
.probe = bcm2835_wdt_probe,
.remove = bcm2835_wdt_remove,
.driver = {
.name = "bcm2835-wdt",
- .of_match_table = bcm2835_wdt_of_match,
},
};
module_platform_driver(bcm2835_wdt_driver);