summaryrefslogtreecommitdiffstats
path: root/drivers/memory
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-12-05 20:43:31 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-05 20:43:31 +0100
commitec939e4c94bd3ef2fd4f34c15f8aaf79bd0c5ee1 (patch)
tree1d39945dbdd233d35c571a726e135fe0ae845814 /drivers/memory
parentMerge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc (diff)
parentMerge tag 'scmi-fix-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/sud... (diff)
downloadlinux-ec939e4c94bd3ef2fd4f34c15f8aaf79bd0c5ee1.tar.xz
linux-ec939e4c94bd3ef2fd4f34c15f8aaf79bd0c5ee1.zip
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
Pull ARM SoC driver updates from Olof Johansson: "Various driver updates for platforms: - A larger set of work on Tegra 2/3 around memory controller and regulator features, some fuse cleanups, etc.. - MMP platform drivers, in particular for USB PHY, and other smaller additions. - Samsung Exynos 5422 driver for DMC (dynamic memory configuration), and ASV (adaptive voltage), allowing the platform to run at more optimal operating points. - Misc refactorings and support for RZ/G2N and R8A774B1 from Renesas - Clock/reset control driver for TI/OMAP - Meson-A1 reset controller support - Qualcomm sdm845 and sda845 SoC IDs for socinfo" * tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (150 commits) firmware: arm_scmi: Fix doorbell ring logic for !CONFIG_64BIT soc: fsl: add RCPM driver dt-bindings: fsl: rcpm: Add 'little-endian' and update Chassis definition memory: tegra: Consolidate registers definition into common header memory: tegra: Ensure timing control debug features are disabled memory: tegra: Introduce Tegra30 EMC driver memory: tegra: Do not handle error from wait_for_completion_timeout() memory: tegra: Increase handshake timeout on Tegra20 memory: tegra: Print a brief info message about EMC timings memory: tegra: Pre-configure debug register on Tegra20 memory: tegra: Include io.h instead of iopoll.h memory: tegra: Adapt for Tegra20 clock driver changes memory: tegra: Don't set EMC rate to maximum on probe for Tegra20 memory: tegra: Add gr2d and gr3d to DRM IOMMU group memory: tegra: Set DMA mask based on supported address bits soc: at91: Add Atmel SFR SN (Serial Number) support memory: atmel-ebi: switch to SPDX license identifiers memory: atmel-ebi: move NUM_CS definition inside EBI driver soc: mediatek: Refactor bus protection control soc: mediatek: Refactor sram control ...
Diffstat (limited to 'drivers/memory')
-rw-r--r--drivers/memory/atmel-ebi.c11
-rw-r--r--drivers/memory/brcmstb_dpfe.c164
-rw-r--r--drivers/memory/emif.c5
-rw-r--r--drivers/memory/jedec_ddr.h61
-rw-r--r--drivers/memory/of_memory.c149
-rw-r--r--drivers/memory/of_memory.h18
-rw-r--r--drivers/memory/samsung/Kconfig13
-rw-r--r--drivers/memory/samsung/Makefile1
-rw-r--r--drivers/memory/samsung/exynos5422-dmc.c1550
-rw-r--r--drivers/memory/tegra/Kconfig10
-rw-r--r--drivers/memory/tegra/Makefile1
-rw-r--r--drivers/memory/tegra/mc.c52
-rw-r--r--drivers/memory/tegra/mc.h74
-rw-r--r--drivers/memory/tegra/tegra114.c10
-rw-r--r--drivers/memory/tegra/tegra124.c30
-rw-r--r--drivers/memory/tegra/tegra20-emc.c134
-rw-r--r--drivers/memory/tegra/tegra30-emc.c1232
-rw-r--r--drivers/memory/tegra/tegra30.c34
18 files changed, 3327 insertions, 222 deletions
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 0322df9dc249..14386d0b5f57 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* EBI driver for Atmel chips
* inspired by the fsl weim bus driver
*
* Copyright (C) 2013 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -19,6 +16,8 @@
#include <linux/regmap.h>
#include <soc/at91/atmel-sfr.h>
+#define AT91_EBI_NUM_CS 8
+
struct atmel_ebi_dev_config {
int cs;
struct atmel_smc_cs_conf smcconf;
@@ -314,7 +313,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
if (ret)
return ret;
- if (cs >= AT91_MATRIX_EBI_NUM_CS ||
+ if (cs >= AT91_EBI_NUM_CS ||
!(ebi->caps->available_cs & BIT(cs))) {
dev_err(dev, "invalid reg property in %pOF\n", np);
return -EINVAL;
@@ -344,7 +343,7 @@ static int atmel_ebi_dev_setup(struct atmel_ebi *ebi, struct device_node *np,
apply = true;
i = 0;
- for_each_set_bit(cs, &cslines, AT91_MATRIX_EBI_NUM_CS) {
+ for_each_set_bit(cs, &cslines, AT91_EBI_NUM_CS) {
ebid->configs[i].cs = cs;
if (apply) {
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 6827ed484750..82b415be18d1 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -127,7 +127,6 @@ enum dpfe_msg_fields {
MSG_COMMAND,
MSG_ARG_COUNT,
MSG_ARG0,
- MSG_CHKSUM,
MSG_FIELD_MAX = 16 /* Max number of arguments */
};
@@ -180,7 +179,7 @@ struct dpfe_api {
};
/* Things we need for as long as we are active. */
-struct private_data {
+struct brcmstb_dpfe_priv {
void __iomem *regs;
void __iomem *dmem;
void __iomem *imem;
@@ -232,9 +231,13 @@ static struct attribute *dpfe_v3_attrs[] = {
};
ATTRIBUTE_GROUPS(dpfe_v3);
-/* API v2 firmware commands */
-static const struct dpfe_api dpfe_api_v2 = {
- .version = 2,
+/*
+ * Old API v2 firmware commands, as defined in the rev 0.61 specification, we
+ * use a version set to 1 to denote that it is not compatible with the new API
+ * v2 and onwards.
+ */
+static const struct dpfe_api dpfe_api_old_v2 = {
+ .version = 1,
.fw_name = "dpfe.bin",
.sysfs_attrs = dpfe_v2_groups,
.command = {
@@ -243,21 +246,42 @@ static const struct dpfe_api dpfe_api_v2 = {
[MSG_COMMAND] = 1,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 1,
- [MSG_CHKSUM] = 4,
},
[DPFE_CMD_GET_REFRESH] = {
[MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
[MSG_COMMAND] = 2,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 1,
- [MSG_CHKSUM] = 5,
},
[DPFE_CMD_GET_VENDOR] = {
[MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
[MSG_COMMAND] = 2,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 2,
- [MSG_CHKSUM] = 6,
+ },
+ }
+};
+
+/*
+ * API v2 firmware commands, as defined in the rev 0.8 specification, named new
+ * v2 here
+ */
+static const struct dpfe_api dpfe_api_new_v2 = {
+ .version = 2,
+ .fw_name = NULL, /* We expect the firmware to have been downloaded! */
+ .sysfs_attrs = dpfe_v2_groups,
+ .command = {
+ [DPFE_CMD_GET_INFO] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x101,
+ },
+ [DPFE_CMD_GET_REFRESH] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x201,
+ },
+ [DPFE_CMD_GET_VENDOR] = {
+ [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
+ [MSG_COMMAND] = 0x202,
},
}
};
@@ -273,49 +297,51 @@ static const struct dpfe_api dpfe_api_v3 = {
[MSG_COMMAND] = 0x0101,
[MSG_ARG_COUNT] = 1,
[MSG_ARG0] = 1,
- [MSG_CHKSUM] = 0x104,
},
[DPFE_CMD_GET_REFRESH] = {
[MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
[MSG_COMMAND] = 0x0202,
[MSG_ARG_COUNT] = 0,
- /*
- * This is a bit ugly. Without arguments, the checksum
- * follows right after the argument count and not at
- * offset MSG_CHKSUM.
- */
- [MSG_ARG0] = 0x203,
},
/* There's no GET_VENDOR command in API v3. */
},
};
-static bool is_dcpu_enabled(void __iomem *regs)
+static bool is_dcpu_enabled(struct brcmstb_dpfe_priv *priv)
{
u32 val;
- val = readl_relaxed(regs + REG_DCPU_RESET);
+ mutex_lock(&priv->lock);
+ val = readl_relaxed(priv->regs + REG_DCPU_RESET);
+ mutex_unlock(&priv->lock);
return !(val & DCPU_RESET_MASK);
}
-static void __disable_dcpu(void __iomem *regs)
+static void __disable_dcpu(struct brcmstb_dpfe_priv *priv)
{
u32 val;
- if (!is_dcpu_enabled(regs))
+ if (!is_dcpu_enabled(priv))
return;
+ mutex_lock(&priv->lock);
+
/* Put DCPU in reset if it's running. */
- val = readl_relaxed(regs + REG_DCPU_RESET);
+ val = readl_relaxed(priv->regs + REG_DCPU_RESET);
val |= (1 << DCPU_RESET_SHIFT);
- writel_relaxed(val, regs + REG_DCPU_RESET);
+ writel_relaxed(val, priv->regs + REG_DCPU_RESET);
+
+ mutex_unlock(&priv->lock);
}
-static void __enable_dcpu(void __iomem *regs)
+static void __enable_dcpu(struct brcmstb_dpfe_priv *priv)
{
+ void __iomem *regs = priv->regs;
u32 val;
+ mutex_lock(&priv->lock);
+
/* Clear mailbox registers. */
writel_relaxed(0, regs + REG_TO_DCPU_MBOX);
writel_relaxed(0, regs + REG_TO_HOST_MBOX);
@@ -329,6 +355,8 @@ static void __enable_dcpu(void __iomem *regs)
val = readl_relaxed(regs + REG_DCPU_RESET);
val &= ~(1 << DCPU_RESET_SHIFT);
writel_relaxed(val, regs + REG_DCPU_RESET);
+
+ mutex_unlock(&priv->lock);
}
static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
@@ -343,7 +371,7 @@ static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
return sum;
}
-static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
+static void __iomem *get_msg_ptr(struct brcmstb_dpfe_priv *priv, u32 response,
char *buf, ssize_t *size)
{
unsigned int msg_type;
@@ -382,7 +410,7 @@ static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
return ptr;
}
-static void __finalize_command(struct private_data *priv)
+static void __finalize_command(struct brcmstb_dpfe_priv *priv)
{
unsigned int release_mbox;
@@ -390,12 +418,12 @@ static void __finalize_command(struct private_data *priv)
* It depends on the API version which MBOX register we have to write to
* to signal we are done.
*/
- release_mbox = (priv->dpfe_api->version < 3)
+ release_mbox = (priv->dpfe_api->version < 2)
? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
writel_relaxed(0, priv->regs + release_mbox);
}
-static int __send_command(struct private_data *priv, unsigned int cmd,
+static int __send_command(struct brcmstb_dpfe_priv *priv, unsigned int cmd,
u32 result[])
{
const u32 *msg = priv->dpfe_api->command[cmd];
@@ -421,9 +449,17 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
return -ETIMEDOUT;
}
+ /* Compute checksum over the message */
+ chksum_idx = msg[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
+ chksum = get_msg_chksum(msg, chksum_idx);
+
/* Write command and arguments to message area */
- for (i = 0; i < MSG_FIELD_MAX; i++)
- writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
+ for (i = 0; i < MSG_FIELD_MAX; i++) {
+ if (i == chksum_idx)
+ writel_relaxed(chksum, regs + DCPU_MSG_RAM(i));
+ else
+ writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
+ }
/* Tell DCPU there is a command waiting */
writel_relaxed(1, regs + REG_TO_DCPU_MBOX);
@@ -517,7 +553,7 @@ static int __verify_firmware(struct init_data *init,
/* Verify checksum by reading back the firmware from co-processor RAM. */
static int __verify_fw_checksum(struct init_data *init,
- struct private_data *priv,
+ struct brcmstb_dpfe_priv *priv,
const struct dpfe_firmware_header *header,
u32 checksum)
{
@@ -571,26 +607,23 @@ static int __write_firmware(u32 __iomem *mem, const u32 *fw,
return 0;
}
-static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
- struct init_data *init)
+static int brcmstb_dpfe_download_firmware(struct brcmstb_dpfe_priv *priv)
{
const struct dpfe_firmware_header *header;
unsigned int dmem_size, imem_size;
- struct device *dev = &pdev->dev;
+ struct device *dev = priv->dev;
bool is_big_endian = false;
- struct private_data *priv;
const struct firmware *fw;
const u32 *dmem, *imem;
+ struct init_data init;
const void *fw_blob;
int ret;
- priv = platform_get_drvdata(pdev);
-
/*
* Skip downloading the firmware if the DCPU is already running and
* responding to commands.
*/
- if (is_dcpu_enabled(priv->regs)) {
+ if (is_dcpu_enabled(priv)) {
u32 response[MSG_FIELD_MAX];
ret = __send_command(priv, DPFE_CMD_GET_INFO, response);
@@ -606,20 +639,23 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
if (!priv->dpfe_api->fw_name)
return -ENODEV;
- ret = request_firmware(&fw, priv->dpfe_api->fw_name, dev);
- /* request_firmware() prints its own error messages. */
+ ret = firmware_request_nowarn(&fw, priv->dpfe_api->fw_name, dev);
+ /*
+ * Defer the firmware download if the firmware file couldn't be found.
+ * The root file system may not be available yet.
+ */
if (ret)
- return ret;
+ return (ret == -ENOENT) ? -EPROBE_DEFER : ret;
- ret = __verify_firmware(init, fw);
+ ret = __verify_firmware(&init, fw);
if (ret)
return -EFAULT;
- __disable_dcpu(priv->regs);
+ __disable_dcpu(priv);
- is_big_endian = init->is_big_endian;
- dmem_size = init->dmem_len;
- imem_size = init->imem_len;
+ is_big_endian = init.is_big_endian;
+ dmem_size = init.dmem_len;
+ imem_size = init.imem_len;
/* At the beginning of the firmware blob is a header. */
header = (struct dpfe_firmware_header *)fw->data;
@@ -637,17 +673,17 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
if (ret)
return ret;
- ret = __verify_fw_checksum(init, priv, header, init->chksum);
+ ret = __verify_fw_checksum(&init, priv, header, init.chksum);
if (ret)
return ret;
- __enable_dcpu(priv->regs);
+ __enable_dcpu(priv);
return 0;
}
static ssize_t generic_show(unsigned int command, u32 response[],
- struct private_data *priv, char *buf)
+ struct brcmstb_dpfe_priv *priv, char *buf)
{
int ret;
@@ -665,7 +701,7 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
char *buf)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
unsigned int info;
ssize_t ret;
@@ -688,7 +724,7 @@ static ssize_t show_refresh(struct device *dev,
{
u32 response[MSG_FIELD_MAX];
void __iomem *info;
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
u8 refresh, sr_abort, ppre, thermal_offs, tuf;
u32 mr4;
ssize_t ret;
@@ -721,7 +757,7 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
void __iomem *info;
unsigned long val;
int ret;
@@ -747,7 +783,7 @@ static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
char *buf)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
void __iomem *info;
ssize_t ret;
u32 mr5, mr6, mr7, mr8, err;
@@ -778,7 +814,7 @@ static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
char *buf)
{
u32 response[MSG_FIELD_MAX];
- struct private_data *priv;
+ struct brcmstb_dpfe_priv *priv;
ssize_t ret;
u32 mr4, mr5, mr6, mr7, mr8, err;
@@ -800,16 +836,15 @@ static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
static int brcmstb_dpfe_resume(struct platform_device *pdev)
{
- struct init_data init;
+ struct brcmstb_dpfe_priv *priv = platform_get_drvdata(pdev);
- return brcmstb_dpfe_download_firmware(pdev, &init);
+ return brcmstb_dpfe_download_firmware(priv);
}
static int brcmstb_dpfe_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct private_data *priv;
- struct init_data init;
+ struct brcmstb_dpfe_priv *priv;
struct resource *res;
int ret;
@@ -817,6 +852,8 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->dev = dev;
+
mutex_init(&priv->lock);
platform_set_drvdata(pdev, priv);
@@ -851,9 +888,10 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = brcmstb_dpfe_download_firmware(pdev, &init);
+ ret = brcmstb_dpfe_download_firmware(priv);
if (ret) {
- dev_err(dev, "Couldn't download firmware -- %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't download firmware -- %d\n", ret);
return ret;
}
@@ -867,7 +905,7 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
static int brcmstb_dpfe_remove(struct platform_device *pdev)
{
- struct private_data *priv = dev_get_drvdata(&pdev->dev);
+ struct brcmstb_dpfe_priv *priv = dev_get_drvdata(&pdev->dev);
sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
@@ -876,10 +914,10 @@ static int brcmstb_dpfe_remove(struct platform_device *pdev)
static const struct of_device_id brcmstb_dpfe_of_match[] = {
/* Use legacy API v2 for a select number of chips */
- { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_v2 },
- { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_v2 },
- { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_v2 },
- { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_v2 },
+ { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_old_v2 },
+ { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_new_v2 },
/* API v3 is the default going forward */
{ .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
{}
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index 402c6bc8e621..9d9127bf2a59 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -1613,7 +1613,7 @@ static void emif_shutdown(struct platform_device *pdev)
static int get_emif_reg_values(struct emif_data *emif, u32 freq,
struct emif_regs *regs)
{
- u32 cs1_used, ip_rev, phy_type;
+ u32 ip_rev, phy_type;
u32 cl, type;
const struct lpddr2_timings *timings;
const struct lpddr2_min_tck *min_tck;
@@ -1621,7 +1621,6 @@ static int get_emif_reg_values(struct emif_data *emif, u32 freq,
const struct lpddr2_addressing *addressing;
struct emif_data *emif_for_calc;
struct device *dev;
- const struct emif_custom_configs *custom_configs;
dev = emif->dev;
/*
@@ -1639,12 +1638,10 @@ static int get_emif_reg_values(struct emif_data *emif, u32 freq,
device_info = emif_for_calc->plat_data->device_info;
type = device_info->type;
- cs1_used = device_info->cs1_used;
ip_rev = emif_for_calc->plat_data->ip_rev;
phy_type = emif_for_calc->plat_data->phy_type;
min_tck = emif_for_calc->plat_data->min_tck;
- custom_configs = emif_for_calc->plat_data->custom_configs;
set_ddr_clk_period(freq);
diff --git a/drivers/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
index 4a21b5044ff8..e59ccbd982d0 100644
--- a/drivers/memory/jedec_ddr.h
+++ b/drivers/memory/jedec_ddr.h
@@ -29,6 +29,7 @@
#define DDR_TYPE_LPDDR2_S4 3
#define DDR_TYPE_LPDDR2_S2 4
#define DDR_TYPE_LPDDR2_NVM 5
+#define DDR_TYPE_LPDDR3 6
/* DDR IO width */
#define DDR_IO_WIDTH_4 1
@@ -169,4 +170,64 @@ extern const struct lpddr2_timings
lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+/*
+ * Structure for timings for LPDDR3 based on LPDDR2 plus additional fields.
+ * All parameters are in pico seconds(ps) excluding max_freq, min_freq which
+ * are in Hz.
+ */
+struct lpddr3_timings {
+ u32 max_freq;
+ u32 min_freq;
+ u32 tRFC;
+ u32 tRRD;
+ u32 tRPab;
+ u32 tRPpb;
+ u32 tRCD;
+ u32 tRC;
+ u32 tRAS;
+ u32 tWTR;
+ u32 tWR;
+ u32 tRTP;
+ u32 tW2W_C2C;
+ u32 tR2R_C2C;
+ u32 tWL;
+ u32 tDQSCK;
+ u32 tRL;
+ u32 tFAW;
+ u32 tXSR;
+ u32 tXP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tMRD;
+};
+
+/*
+ * Min value for some parameters in terms of number of tCK cycles(nCK)
+ * Please set to zero parameters that are not valid for a given memory
+ * type
+ */
+struct lpddr3_min_tck {
+ u32 tRFC;
+ u32 tRRD;
+ u32 tRPab;
+ u32 tRPpb;
+ u32 tRCD;
+ u32 tRC;
+ u32 tRAS;
+ u32 tWTR;
+ u32 tWR;
+ u32 tRTP;
+ u32 tW2W_C2C;
+ u32 tR2R_C2C;
+ u32 tWL;
+ u32 tDQSCK;
+ u32 tRL;
+ u32 tFAW;
+ u32 tXSR;
+ u32 tXP;
+ u32 tCKE;
+ u32 tCKESR;
+ u32 tMRD;
+};
+
#endif /* __JEDEC_DDR_H */
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 46539b27a3fb..71f26eac7350 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -3,6 +3,7 @@
* OpenFirmware helpers for memory drivers
*
* Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
*/
#include <linux/device.h>
@@ -149,3 +150,151 @@ default_timings:
return lpddr2_jedec_timings;
}
EXPORT_SYMBOL(of_get_ddr_timings);
+
+/**
+ * of_lpddr3_get_min_tck() - extract min timing values for lpddr3
+ * @np: pointer to ddr device tree node
+ * @device: device requesting for min timing values
+ *
+ * Populates the lpddr3_min_tck structure by extracting data
+ * from device tree node. Returns a pointer to the populated
+ * structure. If any error in populating the structure, returns NULL.
+ */
+const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
+ struct device *dev)
+{
+ int ret = 0;
+ struct lpddr3_min_tck *min;
+
+ min = devm_kzalloc(dev, sizeof(*min), GFP_KERNEL);
+ if (!min)
+ goto default_min_tck;
+
+ ret |= of_property_read_u32(np, "tRFC-min-tck", &min->tRFC);
+ ret |= of_property_read_u32(np, "tRRD-min-tck", &min->tRRD);
+ ret |= of_property_read_u32(np, "tRPab-min-tck", &min->tRPab);
+ ret |= of_property_read_u32(np, "tRPpb-min-tck", &min->tRPpb);
+ ret |= of_property_read_u32(np, "tRCD-min-tck", &min->tRCD);
+ ret |= of_property_read_u32(np, "tRC-min-tck", &min->tRC);
+ ret |= of_property_read_u32(np, "tRAS-min-tck", &min->tRAS);
+ ret |= of_property_read_u32(np, "tWTR-min-tck", &min->tWTR);
+ ret |= of_property_read_u32(np, "tWR-min-tck", &min->tWR);
+ ret |= of_property_read_u32(np, "tRTP-min-tck", &min->tRTP);
+ ret |= of_property_read_u32(np, "tW2W-C2C-min-tck", &min->tW2W_C2C);
+ ret |= of_property_read_u32(np, "tR2R-C2C-min-tck", &min->tR2R_C2C);
+ ret |= of_property_read_u32(np, "tWL-min-tck", &min->tWL);
+ ret |= of_property_read_u32(np, "tDQSCK-min-tck", &min->tDQSCK);
+ ret |= of_property_read_u32(np, "tRL-min-tck", &min->tRL);
+ ret |= of_property_read_u32(np, "tFAW-min-tck", &min->tFAW);
+ ret |= of_property_read_u32(np, "tXSR-min-tck", &min->tXSR);
+ ret |= of_property_read_u32(np, "tXP-min-tck", &min->tXP);
+ ret |= of_property_read_u32(np, "tCKE-min-tck", &min->tCKE);
+ ret |= of_property_read_u32(np, "tCKESR-min-tck", &min->tCKESR);
+ ret |= of_property_read_u32(np, "tMRD-min-tck", &min->tMRD);
+
+ if (ret) {
+ dev_warn(dev, "%s: errors while parsing min-tck values\n",
+ __func__);
+ devm_kfree(dev, min);
+ goto default_min_tck;
+ }
+
+ return min;
+
+default_min_tck:
+ dev_warn(dev, "%s: using default min-tck values\n", __func__);
+ return NULL;
+}
+EXPORT_SYMBOL(of_lpddr3_get_min_tck);
+
+static int of_lpddr3_do_get_timings(struct device_node *np,
+ struct lpddr3_timings *tim)
+{
+ int ret;
+
+ /* The 'reg' param required since DT has changed, used as 'max-freq' */
+ ret = of_property_read_u32(np, "reg", &tim->max_freq);
+ ret |= of_property_read_u32(np, "min-freq", &tim->min_freq);
+ ret |= of_property_read_u32(np, "tRFC", &tim->tRFC);
+ ret |= of_property_read_u32(np, "tRRD", &tim->tRRD);
+ ret |= of_property_read_u32(np, "tRPab", &tim->tRPab);
+ ret |= of_property_read_u32(np, "tRPpb", &tim->tRPpb);
+ ret |= of_property_read_u32(np, "tRCD", &tim->tRCD);
+ ret |= of_property_read_u32(np, "tRC", &tim->tRC);
+ ret |= of_property_read_u32(np, "tRAS", &tim->tRAS);
+ ret |= of_property_read_u32(np, "tWTR", &tim->tWTR);
+ ret |= of_property_read_u32(np, "tWR", &tim->tWR);
+ ret |= of_property_read_u32(np, "tRTP", &tim->tRTP);
+ ret |= of_property_read_u32(np, "tW2W-C2C", &tim->tW2W_C2C);
+ ret |= of_property_read_u32(np, "tR2R-C2C", &tim->tR2R_C2C);
+ ret |= of_property_read_u32(np, "tFAW", &tim->tFAW);
+ ret |= of_property_read_u32(np, "tXSR", &tim->tXSR);
+ ret |= of_property_read_u32(np, "tXP", &tim->tXP);
+ ret |= of_property_read_u32(np, "tCKE", &tim->tCKE);
+ ret |= of_property_read_u32(np, "tCKESR", &tim->tCKESR);
+ ret |= of_property_read_u32(np, "tMRD", &tim->tMRD);
+
+ return ret;
+}
+
+/**
+ * of_lpddr3_get_ddr_timings() - extracts the lpddr3 timings and updates no of
+ * frequencies available.
+ * @np_ddr: Pointer to ddr device tree node
+ * @dev: Device requesting for ddr timings
+ * @device_type: Type of ddr
+ * @nr_frequencies: No of frequencies available for ddr
+ * (updated by this function)
+ *
+ * Populates lpddr3_timings structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If any error
+ * while populating, returns NULL.
+ */
+const struct lpddr3_timings
+*of_lpddr3_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
+ u32 device_type, u32 *nr_frequencies)
+{
+ struct lpddr3_timings *timings = NULL;
+ u32 arr_sz = 0, i = 0;
+ struct device_node *np_tim;
+ char *tim_compat = NULL;
+
+ switch (device_type) {
+ case DDR_TYPE_LPDDR3:
+ tim_compat = "jedec,lpddr3-timings";
+ break;
+ default:
+ dev_warn(dev, "%s: un-supported memory type\n", __func__);
+ }
+
+ for_each_child_of_node(np_ddr, np_tim)
+ if (of_device_is_compatible(np_tim, tim_compat))
+ arr_sz++;
+
+ if (arr_sz)
+ timings = devm_kcalloc(dev, arr_sz, sizeof(*timings),
+ GFP_KERNEL);
+
+ if (!timings)
+ goto default_timings;
+
+ for_each_child_of_node(np_ddr, np_tim) {
+ if (of_device_is_compatible(np_tim, tim_compat)) {
+ if (of_lpddr3_do_get_timings(np_tim, &timings[i])) {
+ devm_kfree(dev, timings);
+ goto default_timings;
+ }
+ i++;
+ }
+ }
+
+ *nr_frequencies = arr_sz;
+
+ return timings;
+
+default_timings:
+ dev_warn(dev, "%s: failed to get timings\n", __func__);
+ *nr_frequencies = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(of_lpddr3_get_ddr_timings);
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index b077cc836b0b..e39ecc4c733d 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -14,6 +14,11 @@ extern const struct lpddr2_min_tck *of_get_min_tck(struct device_node *np,
extern const struct lpddr2_timings
*of_get_ddr_timings(struct device_node *np_ddr, struct device *dev,
u32 device_type, u32 *nr_frequencies);
+extern const struct lpddr3_min_tck
+ *of_lpddr3_get_min_tck(struct device_node *np, struct device *dev);
+extern const struct lpddr3_timings
+ *of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies);
#else
static inline const struct lpddr2_min_tck
*of_get_min_tck(struct device_node *np, struct device *dev)
@@ -27,6 +32,19 @@ static inline const struct lpddr2_timings
{
return NULL;
}
+
+static inline const struct lpddr3_min_tck
+ *of_lpddr3_get_min_tck(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
+
+static inline const struct lpddr3_timings
+ *of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
+ struct device *dev, u32 device_type, u32 *nr_frequencies)
+{
+ return NULL;
+}
#endif /* CONFIG_OF && CONFIG_DDR */
#endif /* __LINUX_MEMORY_OF_REG_ */
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 79ce7ea58903..e9c3ce92350c 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -7,6 +7,19 @@ config SAMSUNG_MC
if SAMSUNG_MC
+config EXYNOS5422_DMC
+ tristate "EXYNOS5422 Dynamic Memory Controller driver"
+ depends on ARCH_EXYNOS || (COMPILE_TEST && HAS_IOMEM)
+ select DDR
+ depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
+ depends on (PM_DEVFREQ && PM_DEVFREQ_EVENT)
+ help
+ This adds driver for Exynos5422 DMC (Dynamic Memory Controller).
+ The driver provides support for Dynamic Voltage and Frequency Scaling in
+ DMC and DRAM. It also supports changing timings of DRAM running with
+ different frequency. The timings are calculated based on DT memory
+ information.
+
config EXYNOS_SROM
bool "Exynos SROM controller driver" if COMPILE_TEST
depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
diff --git a/drivers/memory/samsung/Makefile b/drivers/memory/samsung/Makefile
index 00587be66211..ea071be21c44 100644
--- a/drivers/memory/samsung/Makefile
+++ b/drivers/memory/samsung/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_EXYNOS5422_DMC) += exynos5422-dmc.o
obj-$(CONFIG_EXYNOS_SROM) += exynos-srom.o
diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
new file mode 100644
index 000000000000..47dbf6d1789f
--- /dev/null
+++ b/drivers/memory/samsung/exynos5422-dmc.c
@@ -0,0 +1,1550 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd.
+ * Author: Lukasz Luba <l.luba@partner.samsung.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
+#define EXYNOS5_DREXI_TIMINGAREF (0x0030)
+#define EXYNOS5_DREXI_TIMINGROW0 (0x0034)
+#define EXYNOS5_DREXI_TIMINGDATA0 (0x0038)
+#define EXYNOS5_DREXI_TIMINGPOWER0 (0x003C)
+#define EXYNOS5_DREXI_TIMINGROW1 (0x00E4)
+#define EXYNOS5_DREXI_TIMINGDATA1 (0x00E8)
+#define EXYNOS5_DREXI_TIMINGPOWER1 (0x00EC)
+#define CDREX_PAUSE (0x2091c)
+#define CDREX_LPDDR3PHY_CON3 (0x20a20)
+#define CDREX_LPDDR3PHY_CLKM_SRC (0x20700)
+#define EXYNOS5_TIMING_SET_SWI BIT(28)
+#define USE_MX_MSPLL_TIMINGS (1)
+#define USE_BPLL_TIMINGS (0)
+#define EXYNOS5_AREF_NORMAL (0x2e)
+
+#define DREX_PPCCLKCON (0x0130)
+#define DREX_PEREV2CONFIG (0x013c)
+#define DREX_PMNC_PPC (0xE000)
+#define DREX_CNTENS_PPC (0xE010)
+#define DREX_CNTENC_PPC (0xE020)
+#define DREX_INTENS_PPC (0xE030)
+#define DREX_INTENC_PPC (0xE040)
+#define DREX_FLAG_PPC (0xE050)
+#define DREX_PMCNT2_PPC (0xE130)
+
+/*
+ * A value for register DREX_PMNC_PPC which should be written to reset
+ * the cycle counter CCNT (a reference wall clock). It sets zero to the
+ * CCNT counter.
+ */
+#define CC_RESET BIT(2)
+
+/*
+ * A value for register DREX_PMNC_PPC which does the reset of all performance
+ * counters to zero.
+ */
+#define PPC_COUNTER_RESET BIT(1)
+
+/*
+ * Enables all configured counters (including cycle counter). The value should
+ * be written to the register DREX_PMNC_PPC.
+ */
+#define PPC_ENABLE BIT(0)
+
+/* A value for register DREX_PPCCLKCON which enables performance events clock.
+ * Must be written before first access to the performance counters register
+ * set, otherwise it could crash.
+ */
+#define PEREV_CLK_EN BIT(0)
+
+/*
+ * Values which are used to enable counters, interrupts or configure flags of
+ * the performance counters. They configure counter 2 and cycle counter.
+ */
+#define PERF_CNT2 BIT(2)
+#define PERF_CCNT BIT(31)
+
+/*
+ * Performance event types which are used for setting the preferred event
+ * to track in the counters.
+ * There is a set of different types, the values are from range 0 to 0x6f.
+ * These settings should be written to the configuration register which manages
+ * the type of the event (register DREX_PEREV2CONFIG).
+ */
+#define READ_TRANSFER_CH0 (0x6d)
+#define READ_TRANSFER_CH1 (0x6f)
+
+#define PERF_COUNTER_START_VALUE 0xff000000
+#define PERF_EVENT_UP_DOWN_THRESHOLD 900000000ULL
+
+/**
+ * struct dmc_opp_table - Operating level desciption
+ *
+ * Covers frequency and voltage settings of the DMC operating mode.
+ */
+struct dmc_opp_table {
+ u32 freq_hz;
+ u32 volt_uv;
+};
+
+/**
+ * struct exynos5_dmc - main structure describing DMC device
+ *
+ * The main structure for the Dynamic Memory Controller which covers clocks,
+ * memory regions, HW information, parameters and current operating mode.
+ */
+struct exynos5_dmc {
+ struct device *dev;
+ struct devfreq *df;
+ struct devfreq_simple_ondemand_data gov_data;
+ void __iomem *base_drexi0;
+ void __iomem *base_drexi1;
+ struct regmap *clk_regmap;
+ struct mutex lock;
+ unsigned long curr_rate;
+ unsigned long curr_volt;
+ unsigned long bypass_rate;
+ struct dmc_opp_table *opp;
+ struct dmc_opp_table opp_bypass;
+ int opp_count;
+ u32 timings_arr_size;
+ u32 *timing_row;
+ u32 *timing_data;
+ u32 *timing_power;
+ const struct lpddr3_timings *timings;
+ const struct lpddr3_min_tck *min_tck;
+ u32 bypass_timing_row;
+ u32 bypass_timing_data;
+ u32 bypass_timing_power;
+ struct regulator *vdd_mif;
+ struct clk *fout_spll;
+ struct clk *fout_bpll;
+ struct clk *mout_spll;
+ struct clk *mout_bpll;
+ struct clk *mout_mclk_cdrex;
+ struct clk *mout_mx_mspll_ccore;
+ struct clk *mx_mspll_ccore_phy;
+ struct clk *mout_mx_mspll_ccore_phy;
+ struct devfreq_event_dev **counter;
+ int num_counters;
+ u64 last_overflow_ts[2];
+ unsigned long load;
+ unsigned long total;
+ bool in_irq_mode;
+};
+
+#define TIMING_FIELD(t_name, t_bit_beg, t_bit_end) \
+ { .name = t_name, .bit_beg = t_bit_beg, .bit_end = t_bit_end }
+
+#define TIMING_VAL2REG(timing, t_val) \
+({ \
+ u32 __val; \
+ __val = (t_val) << (timing)->bit_beg; \
+ __val; \
+})
+
+struct timing_reg {
+ char *name;
+ int bit_beg;
+ int bit_end;
+ unsigned int val;
+};
+
+static const struct timing_reg timing_row[] = {
+ TIMING_FIELD("tRFC", 24, 31),
+ TIMING_FIELD("tRRD", 20, 23),
+ TIMING_FIELD("tRP", 16, 19),
+ TIMING_FIELD("tRCD", 12, 15),
+ TIMING_FIELD("tRC", 6, 11),
+ TIMING_FIELD("tRAS", 0, 5),
+};
+
+static const struct timing_reg timing_data[] = {
+ TIMING_FIELD("tWTR", 28, 31),
+ TIMING_FIELD("tWR", 24, 27),
+ TIMING_FIELD("tRTP", 20, 23),
+ TIMING_FIELD("tW2W-C2C", 14, 14),
+ TIMING_FIELD("tR2R-C2C", 12, 12),
+ TIMING_FIELD("WL", 8, 11),
+ TIMING_FIELD("tDQSCK", 4, 7),
+ TIMING_FIELD("RL", 0, 3),
+};
+
+static const struct timing_reg timing_power[] = {
+ TIMING_FIELD("tFAW", 26, 31),
+ TIMING_FIELD("tXSR", 16, 25),
+ TIMING_FIELD("tXP", 8, 15),
+ TIMING_FIELD("tCKE", 4, 7),
+ TIMING_FIELD("tMRD", 0, 3),
+};
+
+#define TIMING_COUNT (ARRAY_SIZE(timing_row) + ARRAY_SIZE(timing_data) + \
+ ARRAY_SIZE(timing_power))
+
+static int exynos5_counters_set_event(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_set_event(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos5_counters_enable_edev(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_enable_edev(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos5_counters_disable_edev(struct exynos5_dmc *dmc)
+{
+ int i, ret;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+ ret = devfreq_event_disable_edev(dmc->counter[i]);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * find_target_freq_id() - Finds requested frequency in local DMC configuration
+ * @dmc: device for which the information is checked
+ * @target_rate: requested frequency in KHz
+ *
+ * Seeks in the local DMC driver structure for the requested frequency value
+ * and returns index or error value.
+ */
+static int find_target_freq_idx(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int i;
+
+ for (i = dmc->opp_count - 1; i >= 0; i--)
+ if (dmc->opp[i].freq_hz <= target_rate)
+ return i;
+
+ return -EINVAL;
+}
+
+/**
+ * exynos5_switch_timing_regs() - Changes bank register set for DRAM timings
+ * @dmc: device for which the new settings is going to be applied
+ * @set: boolean variable passing set value
+ *
+ * Changes the register set, which holds timing parameters.
+ * There is two register sets: 0 and 1. The register set 0
+ * is used in normal operation when the clock is provided from main PLL.
+ * The bank register set 1 is used when the main PLL frequency is going to be
+ * changed and the clock is taken from alternative, stable source.
+ * This function switches between these banks according to the
+ * currently used clock source.
+ */
+static void exynos5_switch_timing_regs(struct exynos5_dmc *dmc, bool set)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, &reg);
+
+ if (set)
+ reg |= EXYNOS5_TIMING_SET_SWI;
+ else
+ reg &= ~EXYNOS5_TIMING_SET_SWI;
+
+ regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CON3, reg);
+}
+
+/**
+ * exynos5_init_freq_table() - Initialized PM OPP framework
+ * @dmc: DMC device for which the frequencies are used for OPP init
+ * @profile: devfreq device's profile
+ *
+ * Populate the devfreq device's OPP table based on current frequency, voltage.
+ */
+static int exynos5_init_freq_table(struct exynos5_dmc *dmc,
+ struct devfreq_dev_profile *profile)
+{
+ int i, ret;
+ int idx;
+ unsigned long freq;
+
+ ret = dev_pm_opp_of_add_table(dmc->dev);
+ if (ret < 0) {
+ dev_err(dmc->dev, "Failed to get OPP table\n");
+ return ret;
+ }
+
+ dmc->opp_count = dev_pm_opp_get_opp_count(dmc->dev);
+
+ dmc->opp = devm_kmalloc_array(dmc->dev, dmc->opp_count,
+ sizeof(struct dmc_opp_table), GFP_KERNEL);
+ if (!dmc->opp)
+ goto err_opp;
+
+ idx = dmc->opp_count - 1;
+ for (i = 0, freq = ULONG_MAX; i < dmc->opp_count; i++, freq--) {
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_floor(dmc->dev, &freq);
+ if (IS_ERR(opp))
+ goto err_opp;
+
+ dmc->opp[idx - i].freq_hz = freq;
+ dmc->opp[idx - i].volt_uv = dev_pm_opp_get_voltage(opp);
+
+ dev_pm_opp_put(opp);
+ }
+
+ return 0;
+
+err_opp:
+ dev_pm_opp_of_remove_table(dmc->dev);
+
+ return -EINVAL;
+}
+
+/**
+ * exynos5_set_bypass_dram_timings() - Low-level changes of the DRAM timings
+ * @dmc: device for which the new settings is going to be applied
+ * @param: DRAM parameters which passes timing data
+ *
+ * Low-level function for changing timings for DRAM memory clocking from
+ * 'bypass' clock source (fixed frequency @400MHz).
+ * It uses timing bank registers set 1.
+ */
+static void exynos5_set_bypass_dram_timings(struct exynos5_dmc *dmc)
+{
+ writel(EXYNOS5_AREF_NORMAL,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
+
+ writel(dmc->bypass_timing_row,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW1);
+ writel(dmc->bypass_timing_row,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW1);
+ writel(dmc->bypass_timing_data,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA1);
+ writel(dmc->bypass_timing_data,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA1);
+ writel(dmc->bypass_timing_power,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER1);
+ writel(dmc->bypass_timing_power,
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER1);
+}
+
+/**
+ * exynos5_dram_change_timings() - Low-level changes of the DRAM final timings
+ * @dmc: device for which the new settings is going to be applied
+ * @target_rate: target frequency of the DMC
+ *
+ * Low-level function for changing timings for DRAM memory operating from main
+ * clock source (BPLL), which can have different frequencies. Thus, each
+ * frequency must have corresponding timings register values in order to keep
+ * the needed delays.
+ * It uses timing bank registers set 0.
+ */
+static int exynos5_dram_change_timings(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int idx;
+
+ for (idx = dmc->opp_count - 1; idx >= 0; idx--)
+ if (dmc->opp[idx].freq_hz <= target_rate)
+ break;
+
+ if (idx < 0)
+ return -EINVAL;
+
+ writel(EXYNOS5_AREF_NORMAL,
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGAREF);
+
+ writel(dmc->timing_row[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGROW0);
+ writel(dmc->timing_row[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGROW0);
+ writel(dmc->timing_data[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGDATA0);
+ writel(dmc->timing_data[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGDATA0);
+ writel(dmc->timing_power[idx],
+ dmc->base_drexi0 + EXYNOS5_DREXI_TIMINGPOWER0);
+ writel(dmc->timing_power[idx],
+ dmc->base_drexi1 + EXYNOS5_DREXI_TIMINGPOWER0);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_align_target_voltage() - Sets the final voltage for the DMC
+ * @dmc: device for which it is going to be set
+ * @target_volt: new voltage which is chosen to be final
+ *
+ * Function tries to align voltage to the safe level for 'normal' mode.
+ * It checks the need of higher voltage and changes the value. The target
+ * voltage might be lower that currently set and still the system will be
+ * stable.
+ */
+static int exynos5_dmc_align_target_voltage(struct exynos5_dmc *dmc,
+ unsigned long target_volt)
+{
+ int ret = 0;
+
+ if (dmc->curr_volt <= target_volt)
+ return 0;
+
+ ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
+ target_volt);
+ if (!ret)
+ dmc->curr_volt = target_volt;
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_align_bypass_voltage() - Sets the voltage for the DMC
+ * @dmc: device for which it is going to be set
+ * @target_volt: new voltage which is chosen to be final
+ *
+ * Function tries to align voltage to the safe level for the 'bypass' mode.
+ * It checks the need of higher voltage and changes the value.
+ * The target voltage must not be less than currently needed, because
+ * for current frequency the device might become unstable.
+ */
+static int exynos5_dmc_align_bypass_voltage(struct exynos5_dmc *dmc,
+ unsigned long target_volt)
+{
+ int ret = 0;
+ unsigned long bypass_volt = dmc->opp_bypass.volt_uv;
+
+ target_volt = max(bypass_volt, target_volt);
+
+ if (dmc->curr_volt >= target_volt)
+ return 0;
+
+ ret = regulator_set_voltage(dmc->vdd_mif, target_volt,
+ target_volt);
+ if (!ret)
+ dmc->curr_volt = target_volt;
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_align_bypass_dram_timings() - Chooses and sets DRAM timings
+ * @dmc: device for which it is going to be set
+ * @target_rate: new frequency which is chosen to be final
+ *
+ * Function changes the DRAM timings for the temporary 'bypass' mode.
+ */
+static int exynos5_dmc_align_bypass_dram_timings(struct exynos5_dmc *dmc,
+ unsigned long target_rate)
+{
+ int idx = find_target_freq_idx(dmc, target_rate);
+
+ if (idx < 0)
+ return -EINVAL;
+
+ exynos5_set_bypass_dram_timings(dmc);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_switch_to_bypass_configuration() - Switching to temporary clock
+ * @dmc: DMC device for which the switching is going to happen
+ * @target_rate: new frequency which is going to be set as a final
+ * @target_volt: new voltage which is going to be set as a final
+ *
+ * Function configures DMC and clocks for operating in temporary 'bypass' mode.
+ * This mode is used only temporary but if required, changes voltage and timings
+ * for DRAM chips. It switches the main clock to stable clock source for the
+ * period of the main PLL reconfiguration.
+ */
+static int
+exynos5_dmc_switch_to_bypass_configuration(struct exynos5_dmc *dmc,
+ unsigned long target_rate,
+ unsigned long target_volt)
+{
+ int ret;
+
+ /*
+ * Having higher voltage for a particular frequency does not harm
+ * the chip. Use it for the temporary frequency change when one
+ * voltage manipulation might be avoided.
+ */
+ ret = exynos5_dmc_align_bypass_voltage(dmc, target_volt);
+ if (ret)
+ return ret;
+
+ /*
+ * Longer delays for DRAM does not cause crash, the opposite does.
+ */
+ ret = exynos5_dmc_align_bypass_dram_timings(dmc, target_rate);
+ if (ret)
+ return ret;
+
+ /*
+ * Delays are long enough, so use them for the new coming clock.
+ */
+ exynos5_switch_timing_regs(dmc, USE_MX_MSPLL_TIMINGS);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_change_freq_and_volt() - Changes voltage and frequency of the DMC
+ * using safe procedure
+ * @dmc: device for which the frequency is going to be changed
+ * @target_rate: requested new frequency
+ * @target_volt: requested voltage which corresponds to the new frequency
+ *
+ * The DMC frequency change procedure requires a few steps.
+ * The main requirement is to change the clock source in the clk mux
+ * for the time of main clock PLL locking. The assumption is that the
+ * alternative clock source set as parent is stable.
+ * The second parent's clock frequency is fixed to 400MHz, it is named 'bypass'
+ * clock. This requires alignment in DRAM timing parameters for the new
+ * T-period. There is two bank sets for keeping DRAM
+ * timings: set 0 and set 1. The set 0 is used when main clock source is
+ * chosen. The 2nd set of regs is used for 'bypass' clock. Switching between
+ * the two bank sets is part of the process.
+ * The voltage must also be aligned to the minimum required level. There is
+ * this intermediate step with switching to 'bypass' parent clock source.
+ * if the old voltage is lower, it requires an increase of the voltage level.
+ * The complexity of the voltage manipulation is hidden in low level function.
+ * In this function there is last alignment of the voltage level at the end.
+ */
+static int
+exynos5_dmc_change_freq_and_volt(struct exynos5_dmc *dmc,
+ unsigned long target_rate,
+ unsigned long target_volt)
+{
+ int ret;
+
+ ret = exynos5_dmc_switch_to_bypass_configuration(dmc, target_rate,
+ target_volt);
+ if (ret)
+ return ret;
+
+ /*
+ * Voltage is set at least to a level needed for this frequency,
+ * so switching clock source is safe now.
+ */
+ clk_prepare_enable(dmc->fout_spll);
+ clk_prepare_enable(dmc->mout_spll);
+ clk_prepare_enable(dmc->mout_mx_mspll_ccore);
+
+ ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_mx_mspll_ccore);
+ if (ret)
+ goto disable_clocks;
+
+ /*
+ * We are safe to increase the timings for current bypass frequency.
+ * Thanks to this the settings will be ready for the upcoming clock
+ * source change.
+ */
+ exynos5_dram_change_timings(dmc, target_rate);
+
+ clk_set_rate(dmc->fout_bpll, target_rate);
+
+ exynos5_switch_timing_regs(dmc, USE_BPLL_TIMINGS);
+
+ ret = clk_set_parent(dmc->mout_mclk_cdrex, dmc->mout_bpll);
+ if (ret)
+ goto disable_clocks;
+
+ /*
+ * Make sure if the voltage is not from 'bypass' settings and align to
+ * the right level for power efficiency.
+ */
+ ret = exynos5_dmc_align_target_voltage(dmc, target_volt);
+
+disable_clocks:
+ clk_disable_unprepare(dmc->mout_mx_mspll_ccore);
+ clk_disable_unprepare(dmc->mout_spll);
+ clk_disable_unprepare(dmc->fout_spll);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_get_volt_freq() - Gets the frequency and voltage from the OPP
+ * table.
+ * @dmc: device for which the frequency is going to be changed
+ * @freq: requested frequency in KHz
+ * @target_rate: returned frequency which is the same or lower than
+ * requested
+ * @target_volt: returned voltage which corresponds to the returned
+ * frequency
+ *
+ * Function gets requested frequency and checks OPP framework for needed
+ * frequency and voltage. It populates the values 'target_rate' and
+ * 'target_volt' or returns error value when OPP framework fails.
+ */
+static int exynos5_dmc_get_volt_freq(struct exynos5_dmc *dmc,
+ unsigned long *freq,
+ unsigned long *target_rate,
+ unsigned long *target_volt, u32 flags)
+{
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dmc->dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ *target_rate = dev_pm_opp_get_freq(opp);
+ *target_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_target() - Function responsible for changing frequency of DMC
+ * @dev: device for which the frequency is going to be changed
+ * @freq: requested frequency in KHz
+ * @flags: flags provided for this frequency change request
+ *
+ * An entry function provided to the devfreq framework which provides frequency
+ * change of the DMC. The function gets the possible rate from OPP table based
+ * on requested frequency. It calls the next function responsible for the
+ * frequency and voltage change. In case of failure, does not set 'curr_rate'
+ * and returns error value to the framework.
+ */
+static int exynos5_dmc_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+ unsigned long target_rate = 0;
+ unsigned long target_volt = 0;
+ int ret;
+
+ ret = exynos5_dmc_get_volt_freq(dmc, freq, &target_rate, &target_volt,
+ flags);
+
+ if (ret)
+ return ret;
+
+ if (target_rate == dmc->curr_rate)
+ return 0;
+
+ mutex_lock(&dmc->lock);
+
+ ret = exynos5_dmc_change_freq_and_volt(dmc, target_rate, target_volt);
+
+ if (ret) {
+ mutex_unlock(&dmc->lock);
+ return ret;
+ }
+
+ dmc->curr_rate = target_rate;
+
+ mutex_unlock(&dmc->lock);
+ return 0;
+}
+
+/**
+ * exynos5_counters_get() - Gets the performance counters values.
+ * @dmc: device for which the counters are going to be checked
+ * @load_count: variable which is populated with counter value
+ * @total_count: variable which is used as 'wall clock' reference
+ *
+ * Function which provides performance counters values. It sums up counters for
+ * two DMC channels. The 'total_count' is used as a reference and max value.
+ * The ratio 'load_count/total_count' shows the busy percentage [0%, 100%].
+ */
+static int exynos5_counters_get(struct exynos5_dmc *dmc,
+ unsigned long *load_count,
+ unsigned long *total_count)
+{
+ unsigned long total = 0;
+ struct devfreq_event_data event;
+ int ret, i;
+
+ *load_count = 0;
+
+ /* Take into account only read+write counters, but stop all */
+ for (i = 0; i < dmc->num_counters; i++) {
+ if (!dmc->counter[i])
+ continue;
+
+ ret = devfreq_event_get_event(dmc->counter[i], &event);
+ if (ret < 0)
+ return ret;
+
+ *load_count += event.load_count;
+
+ if (total < event.total_count)
+ total = event.total_count;
+ }
+
+ *total_count = total;
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_start_perf_events() - Setup and start performance event counters
+ * @dmc: device for which the counters are going to be checked
+ * @beg_value: initial value for the counter
+ *
+ * Function which enables needed counters, interrupts and sets initial values
+ * then starts the counters.
+ */
+static void exynos5_dmc_start_perf_events(struct exynos5_dmc *dmc,
+ u32 beg_value)
+{
+ /* Enable interrupts for counter 2 */
+ writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENS_PPC);
+ writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENS_PPC);
+
+ /* Enable counter 2 and CCNT */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENS_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENS_PPC);
+
+ /* Clear overflow flag for all counters */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
+
+ /* Reset all counters */
+ writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(CC_RESET | PPC_COUNTER_RESET, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /*
+ * Set start value for the counters, the number of samples that
+ * will be gathered is calculated as: 0xffffffff - beg_value
+ */
+ writel(beg_value, dmc->base_drexi0 + DREX_PMCNT2_PPC);
+ writel(beg_value, dmc->base_drexi1 + DREX_PMCNT2_PPC);
+
+ /* Start all counters */
+ writel(PPC_ENABLE, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(PPC_ENABLE, dmc->base_drexi1 + DREX_PMNC_PPC);
+}
+
+/**
+ * exynos5_dmc_perf_events_calc() - Calculate utilization
+ * @dmc: device for which the counters are going to be checked
+ * @diff_ts: time between last interrupt and current one
+ *
+ * Function which calculates needed utilization for the devfreq governor.
+ * It prepares values for 'busy_time' and 'total_time' based on elapsed time
+ * between interrupts, which approximates utilization.
+ */
+static void exynos5_dmc_perf_events_calc(struct exynos5_dmc *dmc, u64 diff_ts)
+{
+ /*
+ * This is a simple algorithm for managing traffic on DMC.
+ * When there is almost no load the counters overflow every 4s,
+ * no mater the DMC frequency.
+ * The high load might be approximated using linear function.
+ * Knowing that, simple calculation can provide 'busy_time' and
+ * 'total_time' to the devfreq governor which picks up target
+ * frequency.
+ * We want a fast ramp up and slow decay in frequency change function.
+ */
+ if (diff_ts < PERF_EVENT_UP_DOWN_THRESHOLD) {
+ /*
+ * Set higher utilization for the simple_ondemand governor.
+ * The governor should increase the frequency of the DMC.
+ */
+ dmc->load = 70;
+ dmc->total = 100;
+ } else {
+ /*
+ * Set low utilization for the simple_ondemand governor.
+ * The governor should decrease the frequency of the DMC.
+ */
+ dmc->load = 35;
+ dmc->total = 100;
+ }
+
+ dev_dbg(dmc->dev, "diff_ts=%llu\n", diff_ts);
+}
+
+/**
+ * exynos5_dmc_perf_events_check() - Checks the status of the counters
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which is called from threaded IRQ to check the counters state
+ * and to call approximation for the needed utilization.
+ */
+static void exynos5_dmc_perf_events_check(struct exynos5_dmc *dmc)
+{
+ u32 val;
+ u64 diff_ts, ts;
+
+ ts = ktime_get_ns();
+
+ /* Stop all counters */
+ writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /* Check the source in interrupt flag registers (which channel) */
+ val = readl(dmc->base_drexi0 + DREX_FLAG_PPC);
+ if (val) {
+ diff_ts = ts - dmc->last_overflow_ts[0];
+ dmc->last_overflow_ts[0] = ts;
+ dev_dbg(dmc->dev, "drex0 0xE050 val= 0x%08x\n", val);
+ } else {
+ val = readl(dmc->base_drexi1 + DREX_FLAG_PPC);
+ diff_ts = ts - dmc->last_overflow_ts[1];
+ dmc->last_overflow_ts[1] = ts;
+ dev_dbg(dmc->dev, "drex1 0xE050 val= 0x%08x\n", val);
+ }
+
+ exynos5_dmc_perf_events_calc(dmc, diff_ts);
+
+ exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
+}
+
+/**
+ * exynos5_dmc_enable_perf_events() - Enable performance events
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which is setup needed environment and enables counters.
+ */
+static void exynos5_dmc_enable_perf_events(struct exynos5_dmc *dmc)
+{
+ u64 ts;
+
+ /* Enable Performance Event Clock */
+ writel(PEREV_CLK_EN, dmc->base_drexi0 + DREX_PPCCLKCON);
+ writel(PEREV_CLK_EN, dmc->base_drexi1 + DREX_PPCCLKCON);
+
+ /* Select read transfers as performance event2 */
+ writel(READ_TRANSFER_CH0, dmc->base_drexi0 + DREX_PEREV2CONFIG);
+ writel(READ_TRANSFER_CH1, dmc->base_drexi1 + DREX_PEREV2CONFIG);
+
+ ts = ktime_get_ns();
+ dmc->last_overflow_ts[0] = ts;
+ dmc->last_overflow_ts[1] = ts;
+
+ /* Devfreq shouldn't be faster than initialization, play safe though. */
+ dmc->load = 99;
+ dmc->total = 100;
+}
+
+/**
+ * exynos5_dmc_disable_perf_events() - Disable performance events
+ * @dmc: device for which the counters are going to be checked
+ *
+ * Function which stops, disables performance event counters and interrupts.
+ */
+static void exynos5_dmc_disable_perf_events(struct exynos5_dmc *dmc)
+{
+ /* Stop all counters */
+ writel(0, dmc->base_drexi0 + DREX_PMNC_PPC);
+ writel(0, dmc->base_drexi1 + DREX_PMNC_PPC);
+
+ /* Disable interrupts for counter 2 */
+ writel(PERF_CNT2, dmc->base_drexi0 + DREX_INTENC_PPC);
+ writel(PERF_CNT2, dmc->base_drexi1 + DREX_INTENC_PPC);
+
+ /* Disable counter 2 and CCNT */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_CNTENC_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_CNTENC_PPC);
+
+ /* Clear overflow flag for all counters */
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi0 + DREX_FLAG_PPC);
+ writel(PERF_CNT2 | PERF_CCNT, dmc->base_drexi1 + DREX_FLAG_PPC);
+}
+
+/**
+ * exynos5_dmc_get_status() - Read current DMC performance statistics.
+ * @dev: device for which the statistics are requested
+ * @stat: structure which has statistic fields
+ *
+ * Function reads the DMC performance counters and calculates 'busy_time'
+ * and 'total_time'. To protect from overflow, the values are shifted right
+ * by 10. After read out the counters are setup to count again.
+ */
+static int exynos5_dmc_get_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+ unsigned long load, total;
+ int ret;
+
+ if (dmc->in_irq_mode) {
+ stat->current_frequency = dmc->curr_rate;
+ stat->busy_time = dmc->load;
+ stat->total_time = dmc->total;
+ } else {
+ ret = exynos5_counters_get(dmc, &load, &total);
+ if (ret < 0)
+ return -EINVAL;
+
+ /* To protect from overflow, divide by 1024 */
+ stat->busy_time = load >> 10;
+ stat->total_time = total >> 10;
+
+ ret = exynos5_counters_set_event(dmc);
+ if (ret < 0) {
+ dev_err(dev, "could not set event counter\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_get_cur_freq() - Function returns current DMC frequency
+ * @dev: device for which the framework checks operating frequency
+ * @freq: returned frequency value
+ *
+ * It returns the currently used frequency of the DMC. The real operating
+ * frequency might be lower when the clock source value could not be divided
+ * to the requested value.
+ */
+static int exynos5_dmc_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(dev);
+
+ mutex_lock(&dmc->lock);
+ *freq = dmc->curr_rate;
+ mutex_unlock(&dmc->lock);
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_df_profile - Devfreq governor's profile structure
+ *
+ * It provides to the devfreq framework needed functions and polling period.
+ */
+static struct devfreq_dev_profile exynos5_dmc_df_profile = {
+ .target = exynos5_dmc_target,
+ .get_dev_status = exynos5_dmc_get_status,
+ .get_cur_freq = exynos5_dmc_get_cur_freq,
+};
+
+/**
+ * exynos5_dmc_align_initial_frequency() - Align initial frequency value
+ * @dmc: device for which the frequency is going to be set
+ * @bootloader_init_freq: initial frequency set by the bootloader in KHz
+ *
+ * The initial bootloader frequency, which is present during boot, might be
+ * different that supported frequency values in the driver. It is possible
+ * due to different PLL settings or used PLL as a source.
+ * This function provides the 'initial_freq' for the devfreq framework
+ * statistics engine which supports only registered values. Thus, some alignment
+ * must be made.
+ */
+static unsigned long
+exynos5_dmc_align_init_freq(struct exynos5_dmc *dmc,
+ unsigned long bootloader_init_freq)
+{
+ unsigned long aligned_freq;
+ int idx;
+
+ idx = find_target_freq_idx(dmc, bootloader_init_freq);
+ if (idx >= 0)
+ aligned_freq = dmc->opp[idx].freq_hz;
+ else
+ aligned_freq = dmc->opp[dmc->opp_count - 1].freq_hz;
+
+ return aligned_freq;
+}
+
+/**
+ * create_timings_aligned() - Create register values and align with standard
+ * @dmc: device for which the frequency is going to be set
+ * @idx: speed bin in the OPP table
+ * @clk_period_ps: the period of the clock, known as tCK
+ *
+ * The function calculates timings and creates a register value ready for
+ * a frequency transition. The register contains a few timings. They are
+ * shifted by a known offset. The timing value is calculated based on memory
+ * specyfication: minimal time required and minimal cycles required.
+ */
+static int create_timings_aligned(struct exynos5_dmc *dmc, u32 *reg_timing_row,
+ u32 *reg_timing_data, u32 *reg_timing_power,
+ u32 clk_period_ps)
+{
+ u32 val;
+ const struct timing_reg *reg;
+
+ if (clk_period_ps == 0)
+ return -EINVAL;
+
+ *reg_timing_row = 0;
+ *reg_timing_data = 0;
+ *reg_timing_power = 0;
+
+ val = dmc->timings->tRFC / clk_period_ps;
+ val += dmc->timings->tRFC % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRFC);
+ reg = &timing_row[0];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRRD / clk_period_ps;
+ val += dmc->timings->tRRD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRRD);
+ reg = &timing_row[1];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRPab / clk_period_ps;
+ val += dmc->timings->tRPab % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRPab);
+ reg = &timing_row[2];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRCD / clk_period_ps;
+ val += dmc->timings->tRCD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRCD);
+ reg = &timing_row[3];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRC / clk_period_ps;
+ val += dmc->timings->tRC % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRC);
+ reg = &timing_row[4];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRAS / clk_period_ps;
+ val += dmc->timings->tRAS % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRAS);
+ reg = &timing_row[5];
+ *reg_timing_row |= TIMING_VAL2REG(reg, val);
+
+ /* data related timings */
+ val = dmc->timings->tWTR / clk_period_ps;
+ val += dmc->timings->tWTR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWTR);
+ reg = &timing_data[0];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tWR / clk_period_ps;
+ val += dmc->timings->tWR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWR);
+ reg = &timing_data[1];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRTP / clk_period_ps;
+ val += dmc->timings->tRTP % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRTP);
+ reg = &timing_data[2];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tW2W_C2C / clk_period_ps;
+ val += dmc->timings->tW2W_C2C % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tW2W_C2C);
+ reg = &timing_data[3];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tR2R_C2C / clk_period_ps;
+ val += dmc->timings->tR2R_C2C % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tR2R_C2C);
+ reg = &timing_data[4];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tWL / clk_period_ps;
+ val += dmc->timings->tWL % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tWL);
+ reg = &timing_data[5];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tDQSCK / clk_period_ps;
+ val += dmc->timings->tDQSCK % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tDQSCK);
+ reg = &timing_data[6];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tRL / clk_period_ps;
+ val += dmc->timings->tRL % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tRL);
+ reg = &timing_data[7];
+ *reg_timing_data |= TIMING_VAL2REG(reg, val);
+
+ /* power related timings */
+ val = dmc->timings->tFAW / clk_period_ps;
+ val += dmc->timings->tFAW % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXP);
+ reg = &timing_power[0];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tXSR / clk_period_ps;
+ val += dmc->timings->tXSR % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXSR);
+ reg = &timing_power[1];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tXP / clk_period_ps;
+ val += dmc->timings->tXP % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tXP);
+ reg = &timing_power[2];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tCKE / clk_period_ps;
+ val += dmc->timings->tCKE % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tCKE);
+ reg = &timing_power[3];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ val = dmc->timings->tMRD / clk_period_ps;
+ val += dmc->timings->tMRD % clk_period_ps ? 1 : 0;
+ val = max(val, dmc->min_tck->tMRD);
+ reg = &timing_power[4];
+ *reg_timing_power |= TIMING_VAL2REG(reg, val);
+
+ return 0;
+}
+
+/**
+ * of_get_dram_timings() - helper function for parsing DT settings for DRAM
+ * @dmc: device for which the frequency is going to be set
+ *
+ * The function parses DT entries with DRAM information.
+ */
+static int of_get_dram_timings(struct exynos5_dmc *dmc)
+{
+ int ret = 0;
+ int idx;
+ struct device_node *np_ddr;
+ u32 freq_mhz, clk_period_ps;
+
+ np_ddr = of_parse_phandle(dmc->dev->of_node, "device-handle", 0);
+ if (!np_ddr) {
+ dev_warn(dmc->dev, "could not find 'device-handle' in DT\n");
+ return -EINVAL;
+ }
+
+ dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_row)
+ return -ENOMEM;
+
+ dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_data)
+ return -ENOMEM;
+
+ dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT,
+ sizeof(u32), GFP_KERNEL);
+ if (!dmc->timing_power)
+ return -ENOMEM;
+
+ dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev,
+ DDR_TYPE_LPDDR3,
+ &dmc->timings_arr_size);
+ if (!dmc->timings) {
+ of_node_put(np_ddr);
+ dev_warn(dmc->dev, "could not get timings from DT\n");
+ return -EINVAL;
+ }
+
+ dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev);
+ if (!dmc->min_tck) {
+ of_node_put(np_ddr);
+ dev_warn(dmc->dev, "could not get tck from DT\n");
+ return -EINVAL;
+ }
+
+ /* Sorted array of OPPs with frequency ascending */
+ for (idx = 0; idx < dmc->opp_count; idx++) {
+ freq_mhz = dmc->opp[idx].freq_hz / 1000000;
+ clk_period_ps = 1000000 / freq_mhz;
+
+ ret = create_timings_aligned(dmc, &dmc->timing_row[idx],
+ &dmc->timing_data[idx],
+ &dmc->timing_power[idx],
+ clk_period_ps);
+ }
+
+ of_node_put(np_ddr);
+
+ /* Take the highest frequency's timings as 'bypass' */
+ dmc->bypass_timing_row = dmc->timing_row[idx - 1];
+ dmc->bypass_timing_data = dmc->timing_data[idx - 1];
+ dmc->bypass_timing_power = dmc->timing_power[idx - 1];
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_init_clks() - Initialize clocks needed for DMC operation.
+ * @dmc: DMC structure containing needed fields
+ *
+ * Get the needed clocks defined in DT device, enable and set the right parents.
+ * Read current frequency and initialize the initial rate for governor.
+ */
+static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
+{
+ int ret;
+ unsigned long target_volt = 0;
+ unsigned long target_rate = 0;
+ unsigned int tmp;
+
+ dmc->fout_spll = devm_clk_get(dmc->dev, "fout_spll");
+ if (IS_ERR(dmc->fout_spll))
+ return PTR_ERR(dmc->fout_spll);
+
+ dmc->fout_bpll = devm_clk_get(dmc->dev, "fout_bpll");
+ if (IS_ERR(dmc->fout_bpll))
+ return PTR_ERR(dmc->fout_bpll);
+
+ dmc->mout_mclk_cdrex = devm_clk_get(dmc->dev, "mout_mclk_cdrex");
+ if (IS_ERR(dmc->mout_mclk_cdrex))
+ return PTR_ERR(dmc->mout_mclk_cdrex);
+
+ dmc->mout_bpll = devm_clk_get(dmc->dev, "mout_bpll");
+ if (IS_ERR(dmc->mout_bpll))
+ return PTR_ERR(dmc->mout_bpll);
+
+ dmc->mout_mx_mspll_ccore = devm_clk_get(dmc->dev,
+ "mout_mx_mspll_ccore");
+ if (IS_ERR(dmc->mout_mx_mspll_ccore))
+ return PTR_ERR(dmc->mout_mx_mspll_ccore);
+
+ dmc->mout_spll = devm_clk_get(dmc->dev, "ff_dout_spll2");
+ if (IS_ERR(dmc->mout_spll)) {
+ dmc->mout_spll = devm_clk_get(dmc->dev, "mout_sclk_spll");
+ if (IS_ERR(dmc->mout_spll))
+ return PTR_ERR(dmc->mout_spll);
+ }
+
+ /*
+ * Convert frequency to KHz values and set it for the governor.
+ */
+ dmc->curr_rate = clk_get_rate(dmc->mout_mclk_cdrex);
+ dmc->curr_rate = exynos5_dmc_align_init_freq(dmc, dmc->curr_rate);
+ exynos5_dmc_df_profile.initial_freq = dmc->curr_rate;
+
+ ret = exynos5_dmc_get_volt_freq(dmc, &dmc->curr_rate, &target_rate,
+ &target_volt, 0);
+ if (ret)
+ return ret;
+
+ dmc->curr_volt = target_volt;
+
+ clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
+
+ dmc->bypass_rate = clk_get_rate(dmc->mout_mx_mspll_ccore);
+
+ clk_prepare_enable(dmc->fout_bpll);
+ clk_prepare_enable(dmc->mout_bpll);
+
+ /*
+ * Some bootloaders do not set clock routes correctly.
+ * Stop one path in clocks to PHY.
+ */
+ regmap_read(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, &tmp);
+ tmp &= ~(BIT(1) | BIT(0));
+ regmap_write(dmc->clk_regmap, CDREX_LPDDR3PHY_CLKM_SRC, tmp);
+
+ return 0;
+}
+
+/**
+ * exynos5_performance_counters_init() - Initializes performance DMC's counters
+ * @dmc: DMC for which it does the setup
+ *
+ * Initialization of performance counters in DMC for estimating usage.
+ * The counter's values are used for calculation of a memory bandwidth and based
+ * on that the governor changes the frequency.
+ * The counters are not used when the governor is GOVERNOR_USERSPACE.
+ */
+static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
+{
+ int counters_size;
+ int ret, i;
+
+ dmc->num_counters = devfreq_event_get_edev_count(dmc->dev);
+ if (dmc->num_counters < 0) {
+ dev_err(dmc->dev, "could not get devfreq-event counters\n");
+ return dmc->num_counters;
+ }
+
+ counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters;
+ dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL);
+ if (!dmc->counter)
+ return -ENOMEM;
+
+ for (i = 0; i < dmc->num_counters; i++) {
+ dmc->counter[i] =
+ devfreq_event_get_edev_by_phandle(dmc->dev, i);
+ if (IS_ERR_OR_NULL(dmc->counter[i]))
+ return -EPROBE_DEFER;
+ }
+
+ ret = exynos5_counters_enable_edev(dmc);
+ if (ret < 0) {
+ dev_err(dmc->dev, "could not enable event counter\n");
+ return ret;
+ }
+
+ ret = exynos5_counters_set_event(dmc);
+ if (ret < 0) {
+ exynos5_counters_disable_edev(dmc);
+ dev_err(dmc->dev, "could not set event counter\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * exynos5_dmc_set_pause_on_switching() - Controls a pause feature in DMC
+ * @dmc: device which is used for changing this feature
+ * @set: a boolean state passing enable/disable request
+ *
+ * There is a need of pausing DREX DMC when divider or MUX in clock tree
+ * changes its configuration. In such situation access to the memory is blocked
+ * in DMC automatically. This feature is used when clock frequency change
+ * request appears and touches clock tree.
+ */
+static inline int exynos5_dmc_set_pause_on_switching(struct exynos5_dmc *dmc)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(dmc->clk_regmap, CDREX_PAUSE, &val);
+ if (ret)
+ return ret;
+
+ val |= 1UL;
+ regmap_write(dmc->clk_regmap, CDREX_PAUSE, val);
+
+ return 0;
+}
+
+static irqreturn_t dmc_irq_thread(int irq, void *priv)
+{
+ int res;
+ struct exynos5_dmc *dmc = priv;
+
+ mutex_lock(&dmc->df->lock);
+
+ exynos5_dmc_perf_events_check(dmc);
+
+ res = update_devfreq(dmc->df);
+ if (res)
+ dev_warn(dmc->dev, "devfreq failed with %d\n", res);
+
+ mutex_unlock(&dmc->df->lock);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * exynos5_dmc_probe() - Probe function for the DMC driver
+ * @pdev: platform device for which the driver is going to be initialized
+ *
+ * Initialize basic components: clocks, regulators, performance counters, etc.
+ * Read out product version and based on the information setup
+ * internal structures for the controller (frequency and voltage) and for DRAM
+ * memory parameters: timings for each operating frequency.
+ * Register new devfreq device for controlling DVFS of the DMC.
+ */
+static int exynos5_dmc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos5_dmc *dmc;
+ struct resource *res;
+ int irq[2];
+
+ dmc = devm_kzalloc(dev, sizeof(*dmc), GFP_KERNEL);
+ if (!dmc)
+ return -ENOMEM;
+
+ mutex_init(&dmc->lock);
+
+ dmc->dev = dev;
+ platform_set_drvdata(pdev, dmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmc->base_drexi0 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dmc->base_drexi0))
+ return PTR_ERR(dmc->base_drexi0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ dmc->base_drexi1 = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dmc->base_drexi1))
+ return PTR_ERR(dmc->base_drexi1);
+
+ dmc->clk_regmap = syscon_regmap_lookup_by_phandle(np,
+ "samsung,syscon-clk");
+ if (IS_ERR(dmc->clk_regmap))
+ return PTR_ERR(dmc->clk_regmap);
+
+ ret = exynos5_init_freq_table(dmc, &exynos5_dmc_df_profile);
+ if (ret) {
+ dev_warn(dev, "couldn't initialize frequency settings\n");
+ return ret;
+ }
+
+ dmc->vdd_mif = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(dmc->vdd_mif)) {
+ ret = PTR_ERR(dmc->vdd_mif);
+ return ret;
+ }
+
+ ret = exynos5_dmc_init_clks(dmc);
+ if (ret)
+ return ret;
+
+ ret = of_get_dram_timings(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't initialize timings settings\n");
+ goto remove_clocks;
+ }
+
+ ret = exynos5_dmc_set_pause_on_switching(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't get access to PAUSE register\n");
+ goto remove_clocks;
+ }
+
+ /* There is two modes in which the driver works: polling or IRQ */
+ irq[0] = platform_get_irq_byname(pdev, "drex_0");
+ irq[1] = platform_get_irq_byname(pdev, "drex_1");
+ if (irq[0] > 0 && irq[1] > 0) {
+ ret = devm_request_threaded_irq(dev, irq[0], NULL,
+ dmc_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), dmc);
+ if (ret) {
+ dev_err(dev, "couldn't grab IRQ\n");
+ goto remove_clocks;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq[1], NULL,
+ dmc_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), dmc);
+ if (ret) {
+ dev_err(dev, "couldn't grab IRQ\n");
+ goto remove_clocks;
+ }
+
+ /*
+ * Setup default thresholds for the devfreq governor.
+ * The values are chosen based on experiments.
+ */
+ dmc->gov_data.upthreshold = 55;
+ dmc->gov_data.downdifferential = 5;
+
+ exynos5_dmc_enable_perf_events(dmc);
+
+ dmc->in_irq_mode = 1;
+ } else {
+ ret = exynos5_performance_counters_init(dmc);
+ if (ret) {
+ dev_warn(dev, "couldn't probe performance counters\n");
+ goto remove_clocks;
+ }
+
+ /*
+ * Setup default thresholds for the devfreq governor.
+ * The values are chosen based on experiments.
+ */
+ dmc->gov_data.upthreshold = 30;
+ dmc->gov_data.downdifferential = 5;
+
+ exynos5_dmc_df_profile.polling_ms = 500;
+ }
+
+
+ dmc->df = devm_devfreq_add_device(dev, &exynos5_dmc_df_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &dmc->gov_data);
+
+ if (IS_ERR(dmc->df)) {
+ ret = PTR_ERR(dmc->df);
+ goto err_devfreq_add;
+ }
+
+ if (dmc->in_irq_mode)
+ exynos5_dmc_start_perf_events(dmc, PERF_COUNTER_START_VALUE);
+
+ dev_info(dev, "DMC initialized\n");
+
+ return 0;
+
+err_devfreq_add:
+ if (dmc->in_irq_mode)
+ exynos5_dmc_disable_perf_events(dmc);
+ else
+ exynos5_counters_disable_edev(dmc);
+remove_clocks:
+ clk_disable_unprepare(dmc->mout_bpll);
+ clk_disable_unprepare(dmc->fout_bpll);
+
+ return ret;
+}
+
+/**
+ * exynos5_dmc_remove() - Remove function for the platform device
+ * @pdev: platform device which is going to be removed
+ *
+ * The function relies on 'devm' framework function which automatically
+ * clean the device's resources. It just calls explicitly disable function for
+ * the performance counters.
+ */
+static int exynos5_dmc_remove(struct platform_device *pdev)
+{
+ struct exynos5_dmc *dmc = dev_get_drvdata(&pdev->dev);
+
+ if (dmc->in_irq_mode)
+ exynos5_dmc_disable_perf_events(dmc);
+ else
+ exynos5_counters_disable_edev(dmc);
+
+ clk_disable_unprepare(dmc->mout_bpll);
+ clk_disable_unprepare(dmc->fout_bpll);
+
+ dev_pm_opp_remove_table(dmc->dev);
+
+ return 0;
+}
+
+static const struct of_device_id exynos5_dmc_of_match[] = {
+ { .compatible = "samsung,exynos5422-dmc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos5_dmc_of_match);
+
+static struct platform_driver exynos5_dmc_platdrv = {
+ .probe = exynos5_dmc_probe,
+ .remove = exynos5_dmc_remove,
+ .driver = {
+ .name = "exynos5-dmc",
+ .of_match_table = exynos5_dmc_of_match,
+ },
+};
+module_platform_driver(exynos5_dmc_platdrv);
+MODULE_DESCRIPTION("Driver for Exynos5422 Dynamic Memory Controller dynamic frequency and voltage change");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lukasz Luba");
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 4680124ddcab..fbfbaada61a2 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -17,6 +17,16 @@ config TEGRA20_EMC
This driver is required to change memory timings / clock rate for
external memory.
+config TEGRA30_EMC
+ bool "NVIDIA Tegra30 External Memory Controller driver"
+ default y
+ depends on TEGRA_MC && ARCH_TEGRA_3x_SOC
+ help
+ This driver is for the External Memory Controller (EMC) found on
+ Tegra30 chips. The EMC controls the external DRAM on the board.
+ This driver is required to change memory timings / clock rate for
+ external memory.
+
config TEGRA124_EMC
bool "NVIDIA Tegra124 External Memory Controller driver"
default y
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 3971a6b7c487..3d23c4261104 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -11,5 +11,6 @@ tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
obj-$(CONFIG_TEGRA20_EMC) += tegra20-emc.o
+obj-$(CONFIG_TEGRA30_EMC) += tegra30-emc.o
obj-$(CONFIG_TEGRA124_EMC) += tegra124-emc.o
obj-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186.o
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 3d8d322511c5..ec8403557ed4 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -18,39 +19,6 @@
#include "mc.h"
-#define MC_INTSTATUS 0x000
-
-#define MC_INTMASK 0x004
-
-#define MC_ERR_STATUS 0x08
-#define MC_ERR_STATUS_TYPE_SHIFT 28
-#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (6 << MC_ERR_STATUS_TYPE_SHIFT)
-#define MC_ERR_STATUS_TYPE_MASK (0x7 << MC_ERR_STATUS_TYPE_SHIFT)
-#define MC_ERR_STATUS_READABLE (1 << 27)
-#define MC_ERR_STATUS_WRITABLE (1 << 26)
-#define MC_ERR_STATUS_NONSECURE (1 << 25)
-#define MC_ERR_STATUS_ADR_HI_SHIFT 20
-#define MC_ERR_STATUS_ADR_HI_MASK 0x3
-#define MC_ERR_STATUS_SECURITY (1 << 17)
-#define MC_ERR_STATUS_RW (1 << 16)
-
-#define MC_ERR_ADR 0x0c
-
-#define MC_GART_ERROR_REQ 0x30
-#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
-#define MC_SECURITY_VIOLATION_STATUS 0x74
-
-#define MC_EMEM_ARB_CFG 0x90
-#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) (((x) & 0x1ff) << 0)
-#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
-#define MC_EMEM_ARB_MISC0 0xd8
-
-#define MC_EMEM_ADR_CFG 0x54
-#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
-
-#define MC_TIMING_CONTROL 0xfc
-#define MC_TIMING_UPDATE BIT(0)
-
static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
{ .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
@@ -307,7 +275,7 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
return 0;
}
-void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
+int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
{
unsigned int i;
struct tegra_mc_timing *timing = NULL;
@@ -322,11 +290,13 @@ void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
if (!timing) {
dev_err(mc->dev, "no memory timing registered for rate %lu\n",
rate);
- return;
+ return -EINVAL;
}
for (i = 0; i < mc->soc->num_emem_regs; ++i)
mc_writel(mc, timing->emem_data[i], mc->soc->emem_regs[i]);
+
+ return 0;
}
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
@@ -626,6 +596,7 @@ static int tegra_mc_probe(struct platform_device *pdev)
struct resource *res;
struct tegra_mc *mc;
void *isr;
+ u64 mask;
int err;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
@@ -637,6 +608,14 @@ static int tegra_mc_probe(struct platform_device *pdev)
mc->soc = of_device_get_match_data(&pdev->dev);
mc->dev = &pdev->dev;
+ mask = DMA_BIT_MASK(mc->soc->num_address_bits);
+
+ err = dma_coerce_mask_and_coherent(&pdev->dev, mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
/* length of MC tick in nanoseconds */
mc->tick = 30;
@@ -658,6 +637,9 @@ static int tegra_mc_probe(struct platform_device *pdev)
} else
#endif
{
+ /* ensure that debug features are disabled */
+ mc_writel(mc, 0x00000000, MC_TIMING_CONTROL_DBG);
+
err = tegra_mc_setup_latency_allowance(mc);
if (err < 0) {
dev_err(&pdev->dev,
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index f9353494b708..957c6eb74ff9 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -6,20 +6,76 @@
#ifndef MEMORY_TEGRA_MC_H
#define MEMORY_TEGRA_MC_H
+#include <linux/bits.h>
#include <linux/io.h>
#include <linux/types.h>
#include <soc/tegra/mc.h>
-#define MC_INT_DECERR_MTS (1 << 16)
-#define MC_INT_SECERR_SEC (1 << 13)
-#define MC_INT_DECERR_VPR (1 << 12)
-#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
-#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
-#define MC_INT_ARBITRATION_EMEM (1 << 9)
-#define MC_INT_SECURITY_VIOLATION (1 << 8)
-#define MC_INT_INVALID_GART_PAGE (1 << 7)
-#define MC_INT_DECERR_EMEM (1 << 6)
+#define MC_INTSTATUS 0x00
+#define MC_INTMASK 0x04
+#define MC_ERR_STATUS 0x08
+#define MC_ERR_ADR 0x0c
+#define MC_GART_ERROR_REQ 0x30
+#define MC_EMEM_ADR_CFG 0x54
+#define MC_DECERR_EMEM_OTHERS_STATUS 0x58
+#define MC_SECURITY_VIOLATION_STATUS 0x74
+#define MC_EMEM_ARB_CFG 0x90
+#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
+#define MC_EMEM_ARB_TIMING_RCD 0x98
+#define MC_EMEM_ARB_TIMING_RP 0x9c
+#define MC_EMEM_ARB_TIMING_RC 0xa0
+#define MC_EMEM_ARB_TIMING_RAS 0xa4
+#define MC_EMEM_ARB_TIMING_FAW 0xa8
+#define MC_EMEM_ARB_TIMING_RRD 0xac
+#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
+#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
+#define MC_EMEM_ARB_TIMING_R2R 0xb8
+#define MC_EMEM_ARB_TIMING_W2W 0xbc
+#define MC_EMEM_ARB_TIMING_R2W 0xc0
+#define MC_EMEM_ARB_TIMING_W2R 0xc4
+#define MC_EMEM_ARB_DA_TURNS 0xd0
+#define MC_EMEM_ARB_DA_COVERS 0xd4
+#define MC_EMEM_ARB_MISC0 0xd8
+#define MC_EMEM_ARB_MISC1 0xdc
+#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
+#define MC_EMEM_ARB_OVERRIDE 0xe8
+#define MC_TIMING_CONTROL_DBG 0xf8
+#define MC_TIMING_CONTROL 0xfc
+
+#define MC_INT_DECERR_MTS BIT(16)
+#define MC_INT_SECERR_SEC BIT(13)
+#define MC_INT_DECERR_VPR BIT(12)
+#define MC_INT_INVALID_APB_ASID_UPDATE BIT(11)
+#define MC_INT_INVALID_SMMU_PAGE BIT(10)
+#define MC_INT_ARBITRATION_EMEM BIT(9)
+#define MC_INT_SECURITY_VIOLATION BIT(8)
+#define MC_INT_INVALID_GART_PAGE BIT(7)
+#define MC_INT_DECERR_EMEM BIT(6)
+
+#define MC_ERR_STATUS_TYPE_SHIFT 28
+#define MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE (0x6 << 28)
+#define MC_ERR_STATUS_TYPE_MASK (0x7 << 28)
+#define MC_ERR_STATUS_READABLE BIT(27)
+#define MC_ERR_STATUS_WRITABLE BIT(26)
+#define MC_ERR_STATUS_NONSECURE BIT(25)
+#define MC_ERR_STATUS_ADR_HI_SHIFT 20
+#define MC_ERR_STATUS_ADR_HI_MASK 0x3
+#define MC_ERR_STATUS_SECURITY BIT(17)
+#define MC_ERR_STATUS_RW BIT(16)
+
+#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
+
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(x) ((x) & 0x1ff)
+#define MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK 0x1ff
+
+#define MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK 0x1ff
+#define MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE BIT(30)
+#define MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE BIT(31)
+
+#define MC_EMEM_ARB_OVERRIDE_EACK_MASK 0x3
+
+#define MC_TIMING_UPDATE BIT(0)
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index ac8351b5beeb..48ef01c3ff90 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -909,16 +909,18 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
{ .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
};
-static const unsigned int tegra114_group_display[] = {
+static const unsigned int tegra114_group_drm[] = {
TEGRA_SWGROUP_DC,
TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
};
static const struct tegra_smmu_group_soc tegra114_groups[] = {
{
- .name = "display",
- .swgroups = tegra114_group_display,
- .num_swgroups = ARRAY_SIZE(tegra114_group_display),
+ .name = "drm",
+ .swgroups = tegra114_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra114_group_drm),
},
};
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 5d0ccb2be206..493b5dc3a4b3 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -10,26 +10,6 @@
#include "mc.h"
-#define MC_EMEM_ARB_CFG 0x90
-#define MC_EMEM_ARB_OUTSTANDING_REQ 0x94
-#define MC_EMEM_ARB_TIMING_RCD 0x98
-#define MC_EMEM_ARB_TIMING_RP 0x9c
-#define MC_EMEM_ARB_TIMING_RC 0xa0
-#define MC_EMEM_ARB_TIMING_RAS 0xa4
-#define MC_EMEM_ARB_TIMING_FAW 0xa8
-#define MC_EMEM_ARB_TIMING_RRD 0xac
-#define MC_EMEM_ARB_TIMING_RAP2PRE 0xb0
-#define MC_EMEM_ARB_TIMING_WAP2PRE 0xb4
-#define MC_EMEM_ARB_TIMING_R2R 0xb8
-#define MC_EMEM_ARB_TIMING_W2W 0xbc
-#define MC_EMEM_ARB_TIMING_R2W 0xc0
-#define MC_EMEM_ARB_TIMING_W2R 0xc4
-#define MC_EMEM_ARB_DA_TURNS 0xd0
-#define MC_EMEM_ARB_DA_COVERS 0xd4
-#define MC_EMEM_ARB_MISC0 0xd8
-#define MC_EMEM_ARB_MISC1 0xdc
-#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
-
static const struct tegra_mc_client tegra124_mc_clients[] = {
{
.id = 0x00,
@@ -974,16 +954,18 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
{ .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
};
-static const unsigned int tegra124_group_display[] = {
+static const unsigned int tegra124_group_drm[] = {
TEGRA_SWGROUP_DC,
TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_GPU,
+ TEGRA_SWGROUP_VIC,
};
static const struct tegra_smmu_group_soc tegra124_groups[] = {
{
- .name = "display",
- .swgroups = tegra124_group_display,
- .num_swgroups = ARRAY_SIZE(tegra124_group_display),
+ .name = "drm",
+ .swgroups = tegra124_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra124_group_drm),
},
};
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 9ee5bef49e47..1b23b1c34476 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -6,10 +6,11 @@
*/
#include <linux/clk.h>
+#include <linux/clk/tegra.h>
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/interrupt.h>
-#include <linux/iopoll.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -21,6 +22,7 @@
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
#define EMC_TIMING_CONTROL 0x028
#define EMC_RC 0x02c
#define EMC_RFC 0x030
@@ -79,6 +81,12 @@
#define EMC_REFRESH_OVERFLOW_INT BIT(3)
#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_READ_DQM_CTRL BIT(9)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -137,9 +145,6 @@ struct tegra_emc {
struct device *dev;
struct completion clk_handshake_complete;
struct notifier_block clk_nb;
- struct clk *backup_clk;
- struct clk *emc_mux;
- struct clk *pll_m;
struct clk *clk;
void __iomem *regs;
@@ -219,7 +224,7 @@ static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
{
- long timeout;
+ unsigned long timeout;
dev_dbg(emc->dev, "%s: flush %d\n", __func__, flush);
@@ -231,14 +236,10 @@ static int emc_complete_timing_change(struct tegra_emc *emc, bool flush)
}
timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
- usecs_to_jiffies(100));
+ msecs_to_jiffies(100));
if (timeout == 0) {
dev_err(emc->dev, "EMC-CAR handshake failed\n");
return -EIO;
- } else if (timeout < 0) {
- dev_err(emc->dev, "failed to wait for EMC-CAR handshake: %ld\n",
- timeout);
- return timeout;
}
return 0;
@@ -363,6 +364,13 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
NULL);
+ dev_info(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
return 0;
}
@@ -398,7 +406,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
- u32 emc_cfg;
+ u32 emc_cfg, emc_dbg;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -421,42 +429,53 @@ static int emc_setup_hw(struct tegra_emc *emc)
writel_relaxed(intmask, emc->regs + EMC_INTMASK);
writel_relaxed(intmask, emc->regs + EMC_INTSTATUS);
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
return 0;
}
-static int emc_init(struct tegra_emc *emc, unsigned long rate)
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
{
- int err;
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
- err = clk_set_parent(emc->emc_mux, emc->backup_clk);
- if (err) {
- dev_err(emc->dev,
- "failed to reparent to backup source: %d\n", err);
- return err;
- }
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
- err = clk_set_rate(emc->pll_m, rate);
- if (err) {
- dev_err(emc->dev,
- "failed to change pll_m rate: %d\n", err);
- return err;
- }
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
- err = clk_set_parent(emc->emc_mux, emc->pll_m);
- if (err) {
- dev_err(emc->dev,
- "failed to reparent to pll_m: %d\n", err);
- return err;
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
}
- err = clk_set_rate(emc->clk, rate);
- if (err) {
- dev_err(emc->dev,
- "failed to change emc rate: %d\n", err);
- return err;
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
}
- return 0;
+ return timing->rate;
}
static int tegra_emc_probe(struct platform_device *pdev)
@@ -515,57 +534,26 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
}
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
emc->clk = devm_clk_get(&pdev->dev, "emc");
if (IS_ERR(emc->clk)) {
err = PTR_ERR(emc->clk);
dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- return err;
- }
-
- emc->pll_m = clk_get_sys(NULL, "pll_m");
- if (IS_ERR(emc->pll_m)) {
- err = PTR_ERR(emc->pll_m);
- dev_err(&pdev->dev, "failed to get pll_m clock: %d\n", err);
- return err;
- }
-
- emc->backup_clk = clk_get_sys(NULL, "pll_p");
- if (IS_ERR(emc->backup_clk)) {
- err = PTR_ERR(emc->backup_clk);
- dev_err(&pdev->dev, "failed to get pll_p clock: %d\n", err);
- goto put_pll_m;
- }
-
- emc->emc_mux = clk_get_parent(emc->clk);
- if (IS_ERR(emc->emc_mux)) {
- err = PTR_ERR(emc->emc_mux);
- dev_err(&pdev->dev, "failed to get emc_mux clock: %d\n", err);
- goto put_backup;
+ goto unset_cb;
}
err = clk_notifier_register(emc->clk, &emc->clk_nb);
if (err) {
dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
err);
- goto put_backup;
- }
-
- /* set DRAM clock rate to maximum */
- err = emc_init(emc, emc->timings[emc->num_timings - 1].rate);
- if (err) {
- dev_err(&pdev->dev, "failed to initialize EMC clock rate: %d\n",
- err);
- goto unreg_notifier;
+ goto unset_cb;
}
return 0;
-unreg_notifier:
- clk_notifier_unregister(emc->clk, &emc->clk_nb);
-put_backup:
- clk_put(emc->backup_clk);
-put_pll_m:
- clk_put(emc->pll_m);
+unset_cb:
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
return err;
}
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
new file mode 100644
index 000000000000..6929980bf907
--- /dev/null
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -0,0 +1,1232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Tegra30 External Memory Controller driver
+ *
+ * Based on downstream driver from NVIDIA and tegra124-emc.c
+ * Copyright (C) 2011-2014 NVIDIA Corporation
+ *
+ * Author: Dmitry Osipenko <digetx@gmail.com>
+ * Copyright (C) 2019 GRATE-DRIVER project
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/types.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "mc.h"
+
+#define EMC_INTSTATUS 0x000
+#define EMC_INTMASK 0x004
+#define EMC_DBG 0x008
+#define EMC_CFG 0x00c
+#define EMC_REFCTRL 0x020
+#define EMC_TIMING_CONTROL 0x028
+#define EMC_RC 0x02c
+#define EMC_RFC 0x030
+#define EMC_RAS 0x034
+#define EMC_RP 0x038
+#define EMC_R2W 0x03c
+#define EMC_W2R 0x040
+#define EMC_R2P 0x044
+#define EMC_W2P 0x048
+#define EMC_RD_RCD 0x04c
+#define EMC_WR_RCD 0x050
+#define EMC_RRD 0x054
+#define EMC_REXT 0x058
+#define EMC_WDV 0x05c
+#define EMC_QUSE 0x060
+#define EMC_QRST 0x064
+#define EMC_QSAFE 0x068
+#define EMC_RDV 0x06c
+#define EMC_REFRESH 0x070
+#define EMC_BURST_REFRESH_NUM 0x074
+#define EMC_PDEX2WR 0x078
+#define EMC_PDEX2RD 0x07c
+#define EMC_PCHG2PDEN 0x080
+#define EMC_ACT2PDEN 0x084
+#define EMC_AR2PDEN 0x088
+#define EMC_RW2PDEN 0x08c
+#define EMC_TXSR 0x090
+#define EMC_TCKE 0x094
+#define EMC_TFAW 0x098
+#define EMC_TRPAB 0x09c
+#define EMC_TCLKSTABLE 0x0a0
+#define EMC_TCLKSTOP 0x0a4
+#define EMC_TREFBW 0x0a8
+#define EMC_QUSE_EXTRA 0x0ac
+#define EMC_ODT_WRITE 0x0b0
+#define EMC_ODT_READ 0x0b4
+#define EMC_WEXT 0x0b8
+#define EMC_CTT 0x0bc
+#define EMC_MRS_WAIT_CNT 0x0c8
+#define EMC_MRS 0x0cc
+#define EMC_EMRS 0x0d0
+#define EMC_SELF_REF 0x0e0
+#define EMC_MRW 0x0e8
+#define EMC_XM2DQSPADCTRL3 0x0f8
+#define EMC_FBIO_SPARE 0x100
+#define EMC_FBIO_CFG5 0x104
+#define EMC_FBIO_CFG6 0x114
+#define EMC_CFG_RSV 0x120
+#define EMC_AUTO_CAL_CONFIG 0x2a4
+#define EMC_AUTO_CAL_INTERVAL 0x2a8
+#define EMC_AUTO_CAL_STATUS 0x2ac
+#define EMC_STATUS 0x2b4
+#define EMC_CFG_2 0x2b8
+#define EMC_CFG_DIG_DLL 0x2bc
+#define EMC_CFG_DIG_DLL_PERIOD 0x2c0
+#define EMC_CTT_DURATION 0x2d8
+#define EMC_CTT_TERM_CTRL 0x2dc
+#define EMC_ZCAL_INTERVAL 0x2e0
+#define EMC_ZCAL_WAIT_CNT 0x2e4
+#define EMC_ZQ_CAL 0x2ec
+#define EMC_XM2CMDPADCTRL 0x2f0
+#define EMC_XM2DQSPADCTRL2 0x2fc
+#define EMC_XM2DQPADCTRL2 0x304
+#define EMC_XM2CLKPADCTRL 0x308
+#define EMC_XM2COMPPADCTRL 0x30c
+#define EMC_XM2VTTGENPADCTRL 0x310
+#define EMC_XM2VTTGENPADCTRL2 0x314
+#define EMC_XM2QUSEPADCTRL 0x318
+#define EMC_DLL_XFORM_DQS0 0x328
+#define EMC_DLL_XFORM_DQS1 0x32c
+#define EMC_DLL_XFORM_DQS2 0x330
+#define EMC_DLL_XFORM_DQS3 0x334
+#define EMC_DLL_XFORM_DQS4 0x338
+#define EMC_DLL_XFORM_DQS5 0x33c
+#define EMC_DLL_XFORM_DQS6 0x340
+#define EMC_DLL_XFORM_DQS7 0x344
+#define EMC_DLL_XFORM_QUSE0 0x348
+#define EMC_DLL_XFORM_QUSE1 0x34c
+#define EMC_DLL_XFORM_QUSE2 0x350
+#define EMC_DLL_XFORM_QUSE3 0x354
+#define EMC_DLL_XFORM_QUSE4 0x358
+#define EMC_DLL_XFORM_QUSE5 0x35c
+#define EMC_DLL_XFORM_QUSE6 0x360
+#define EMC_DLL_XFORM_QUSE7 0x364
+#define EMC_DLL_XFORM_DQ0 0x368
+#define EMC_DLL_XFORM_DQ1 0x36c
+#define EMC_DLL_XFORM_DQ2 0x370
+#define EMC_DLL_XFORM_DQ3 0x374
+#define EMC_DLI_TRIM_TXDQS0 0x3a8
+#define EMC_DLI_TRIM_TXDQS1 0x3ac
+#define EMC_DLI_TRIM_TXDQS2 0x3b0
+#define EMC_DLI_TRIM_TXDQS3 0x3b4
+#define EMC_DLI_TRIM_TXDQS4 0x3b8
+#define EMC_DLI_TRIM_TXDQS5 0x3bc
+#define EMC_DLI_TRIM_TXDQS6 0x3c0
+#define EMC_DLI_TRIM_TXDQS7 0x3c4
+#define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8
+#define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
+#define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0
+#define EMC_SEL_DPD_CTRL 0x3d8
+#define EMC_PRE_REFRESH_REQ_CNT 0x3dc
+#define EMC_DYN_SELF_REF_CONTROL 0x3e0
+#define EMC_TXSRDLL 0x3e4
+
+#define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
+
+#define EMC_MODE_SET_DLL_RESET BIT(8)
+#define EMC_MODE_SET_LONG_CNT BIT(26)
+
+#define EMC_SELF_REF_CMD_ENABLED BIT(0)
+
+#define DRAM_DEV_SEL_ALL (0 << 30)
+#define DRAM_DEV_SEL_0 (2 << 30)
+#define DRAM_DEV_SEL_1 (1 << 30)
+#define DRAM_BROADCAST(num) \
+ ((num) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0)
+
+#define EMC_ZQ_CAL_CMD BIT(0)
+#define EMC_ZQ_CAL_LONG BIT(4)
+#define EMC_ZQ_CAL_LONG_CMD_DEV0 \
+ (DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+#define EMC_ZQ_CAL_LONG_CMD_DEV1 \
+ (DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD)
+
+#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
+#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
+#define EMC_DBG_FORCE_UPDATE BIT(2)
+#define EMC_DBG_CFG_PRIORITY BIT(24)
+
+#define EMC_CFG5_QUSE_MODE_SHIFT 13
+#define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT)
+
+#define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2
+#define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3
+
+#define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9)
+
+#define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10)
+
+#define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4)
+
+#define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5)
+#define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5)
+
+#define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+#define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
+
+#define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
+#define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
+ (0x3ff << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT)
+
+#define EMC_REFCTRL_DEV_SEL_MASK 0x3
+#define EMC_REFCTRL_ENABLE BIT(31)
+#define EMC_REFCTRL_ENABLE_ALL(num) \
+ (((num) > 1 ? 0 : 2) | EMC_REFCTRL_ENABLE)
+#define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2)
+
+#define EMC_CFG_PERIODIC_QRST BIT(21)
+#define EMC_CFG_DYN_SREF_ENABLE BIT(28)
+
+#define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
+#define EMC_CLKCHANGE_PD_ENABLE BIT(1)
+#define EMC_CLKCHANGE_SR_ENABLE BIT(2)
+
+#define EMC_TIMING_UPDATE BIT(0)
+
+#define EMC_REFRESH_OVERFLOW_INT BIT(3)
+#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+
+enum emc_dram_type {
+ DRAM_TYPE_DDR3,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
+enum emc_dll_change {
+ DLL_CHANGE_NONE,
+ DLL_CHANGE_ON,
+ DLL_CHANGE_OFF
+};
+
+static const u16 emc_timing_registers[] = {
+ [0] = EMC_RC,
+ [1] = EMC_RFC,
+ [2] = EMC_RAS,
+ [3] = EMC_RP,
+ [4] = EMC_R2W,
+ [5] = EMC_W2R,
+ [6] = EMC_R2P,
+ [7] = EMC_W2P,
+ [8] = EMC_RD_RCD,
+ [9] = EMC_WR_RCD,
+ [10] = EMC_RRD,
+ [11] = EMC_REXT,
+ [12] = EMC_WEXT,
+ [13] = EMC_WDV,
+ [14] = EMC_QUSE,
+ [15] = EMC_QRST,
+ [16] = EMC_QSAFE,
+ [17] = EMC_RDV,
+ [18] = EMC_REFRESH,
+ [19] = EMC_BURST_REFRESH_NUM,
+ [20] = EMC_PRE_REFRESH_REQ_CNT,
+ [21] = EMC_PDEX2WR,
+ [22] = EMC_PDEX2RD,
+ [23] = EMC_PCHG2PDEN,
+ [24] = EMC_ACT2PDEN,
+ [25] = EMC_AR2PDEN,
+ [26] = EMC_RW2PDEN,
+ [27] = EMC_TXSR,
+ [28] = EMC_TXSRDLL,
+ [29] = EMC_TCKE,
+ [30] = EMC_TFAW,
+ [31] = EMC_TRPAB,
+ [32] = EMC_TCLKSTABLE,
+ [33] = EMC_TCLKSTOP,
+ [34] = EMC_TREFBW,
+ [35] = EMC_QUSE_EXTRA,
+ [36] = EMC_FBIO_CFG6,
+ [37] = EMC_ODT_WRITE,
+ [38] = EMC_ODT_READ,
+ [39] = EMC_FBIO_CFG5,
+ [40] = EMC_CFG_DIG_DLL,
+ [41] = EMC_CFG_DIG_DLL_PERIOD,
+ [42] = EMC_DLL_XFORM_DQS0,
+ [43] = EMC_DLL_XFORM_DQS1,
+ [44] = EMC_DLL_XFORM_DQS2,
+ [45] = EMC_DLL_XFORM_DQS3,
+ [46] = EMC_DLL_XFORM_DQS4,
+ [47] = EMC_DLL_XFORM_DQS5,
+ [48] = EMC_DLL_XFORM_DQS6,
+ [49] = EMC_DLL_XFORM_DQS7,
+ [50] = EMC_DLL_XFORM_QUSE0,
+ [51] = EMC_DLL_XFORM_QUSE1,
+ [52] = EMC_DLL_XFORM_QUSE2,
+ [53] = EMC_DLL_XFORM_QUSE3,
+ [54] = EMC_DLL_XFORM_QUSE4,
+ [55] = EMC_DLL_XFORM_QUSE5,
+ [56] = EMC_DLL_XFORM_QUSE6,
+ [57] = EMC_DLL_XFORM_QUSE7,
+ [58] = EMC_DLI_TRIM_TXDQS0,
+ [59] = EMC_DLI_TRIM_TXDQS1,
+ [60] = EMC_DLI_TRIM_TXDQS2,
+ [61] = EMC_DLI_TRIM_TXDQS3,
+ [62] = EMC_DLI_TRIM_TXDQS4,
+ [63] = EMC_DLI_TRIM_TXDQS5,
+ [64] = EMC_DLI_TRIM_TXDQS6,
+ [65] = EMC_DLI_TRIM_TXDQS7,
+ [66] = EMC_DLL_XFORM_DQ0,
+ [67] = EMC_DLL_XFORM_DQ1,
+ [68] = EMC_DLL_XFORM_DQ2,
+ [69] = EMC_DLL_XFORM_DQ3,
+ [70] = EMC_XM2CMDPADCTRL,
+ [71] = EMC_XM2DQSPADCTRL2,
+ [72] = EMC_XM2DQPADCTRL2,
+ [73] = EMC_XM2CLKPADCTRL,
+ [74] = EMC_XM2COMPPADCTRL,
+ [75] = EMC_XM2VTTGENPADCTRL,
+ [76] = EMC_XM2VTTGENPADCTRL2,
+ [77] = EMC_XM2QUSEPADCTRL,
+ [78] = EMC_XM2DQSPADCTRL3,
+ [79] = EMC_CTT_TERM_CTRL,
+ [80] = EMC_ZCAL_INTERVAL,
+ [81] = EMC_ZCAL_WAIT_CNT,
+ [82] = EMC_MRS_WAIT_CNT,
+ [83] = EMC_AUTO_CAL_CONFIG,
+ [84] = EMC_CTT,
+ [85] = EMC_CTT_DURATION,
+ [86] = EMC_DYN_SELF_REF_CONTROL,
+ [87] = EMC_FBIO_SPARE,
+ [88] = EMC_CFG_RSV,
+};
+
+struct emc_timing {
+ unsigned long rate;
+
+ u32 data[ARRAY_SIZE(emc_timing_registers)];
+
+ u32 emc_auto_cal_interval;
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+ u32 emc_zcal_cnt_long;
+ bool emc_cfg_periodic_qrst;
+ bool emc_cfg_dyn_self_ref;
+};
+
+struct tegra_emc {
+ struct device *dev;
+ struct tegra_mc *mc;
+ struct completion clk_handshake_complete;
+ struct notifier_block clk_nb;
+ struct clk *clk;
+ void __iomem *regs;
+ unsigned int irq;
+
+ struct emc_timing *timings;
+ unsigned int num_timings;
+
+ u32 mc_override;
+ u32 emc_cfg;
+
+ u32 emc_mode_1;
+ u32 emc_mode_2;
+ u32 emc_mode_reset;
+
+ bool vref_cal_toggle : 1;
+ bool zcal_long : 1;
+ bool dll_on : 1;
+ bool prepared : 1;
+ bool bad_state : 1;
+};
+
+static irqreturn_t tegra_emc_isr(int irq, void *data)
+{
+ struct tegra_emc *emc = data;
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 status;
+
+ status = readl_relaxed(emc->regs + EMC_INTSTATUS) & intmask;
+ if (!status)
+ return IRQ_NONE;
+
+ /* notify about EMC-CAR handshake completion */
+ if (status & EMC_CLKCHANGE_COMPLETE_INT)
+ complete(&emc->clk_handshake_complete);
+
+ /* notify about HW problem */
+ if (status & EMC_REFRESH_OVERFLOW_INT)
+ dev_err_ratelimited(emc->dev,
+ "refresh request overflow timeout\n");
+
+ /* clear interrupts */
+ writel_relaxed(status, emc->regs + EMC_INTSTATUS);
+
+ return IRQ_HANDLED;
+}
+
+static struct emc_timing *emc_find_timing(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = NULL;
+ unsigned int i;
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate >= rate) {
+ timing = &emc->timings[i];
+ break;
+ }
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu\n", rate);
+ return NULL;
+ }
+
+ return timing;
+}
+
+static bool emc_dqs_preset(struct tegra_emc *emc, struct emc_timing *timing,
+ bool *schmitt_to_vref)
+{
+ bool preset = false;
+ u32 val;
+
+ if (timing->data[71] & EMC_XM2DQSPADCTRL2_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL2);
+
+ if (!(val & EMC_XM2DQSPADCTRL2_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL2_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL2);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[78] & EMC_XM2DQSPADCTRL3_VREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2DQSPADCTRL3);
+
+ if (!(val & EMC_XM2DQSPADCTRL3_VREF_ENABLE)) {
+ val |= EMC_XM2DQSPADCTRL3_VREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2DQSPADCTRL3);
+
+ preset = true;
+ }
+ }
+
+ if (timing->data[77] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE) {
+ val = readl_relaxed(emc->regs + EMC_XM2QUSEPADCTRL);
+
+ if (!(val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE)) {
+ val |= EMC_XM2QUSEPADCTRL_IVREF_ENABLE;
+ writel_relaxed(val, emc->regs + EMC_XM2QUSEPADCTRL);
+
+ *schmitt_to_vref = true;
+ preset = true;
+ }
+ }
+
+ return preset;
+}
+
+static int emc_seq_update_timing(struct tegra_emc *emc)
+{
+ u32 val;
+ int err;
+
+ writel_relaxed(EMC_TIMING_UPDATE, emc->regs + EMC_TIMING_CONTROL);
+
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_STATUS, val,
+ !(val & EMC_STATUS_TIMING_UPDATE_STALLED),
+ 1, 200);
+ if (err) {
+ dev_err(emc->dev, "failed to update timing: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int emc_prepare_mc_clk_cfg(struct tegra_emc *emc, unsigned long rate)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int misc0_index = 16;
+ unsigned int i;
+ bool same;
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (mc->timings[i].rate != rate)
+ continue;
+
+ if (mc->timings[i].emem_data[misc0_index] & BIT(27))
+ same = true;
+ else
+ same = false;
+
+ return tegra20_clk_prepare_emc_mc_same_freq(emc->clk, same);
+ }
+
+ return -EINVAL;
+}
+
+static int emc_prepare_timing_change(struct tegra_emc *emc, unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ enum emc_dll_change dll_change;
+ enum emc_dram_type dram_type;
+ bool schmitt_to_vref = false;
+ unsigned int pre_wait = 0;
+ bool qrst_used = false;
+ unsigned int dram_num;
+ unsigned int i;
+ u32 fbio_cfg5;
+ u32 emc_dbg;
+ u32 val;
+ int err;
+
+ if (!timing || emc->bad_state)
+ return -EINVAL;
+
+ dev_dbg(emc->dev, "%s: using timing rate %lu for requested rate %lu\n",
+ __func__, timing->rate, rate);
+
+ emc->bad_state = true;
+
+ err = emc_prepare_mc_clk_cfg(emc, rate);
+ if (err) {
+ dev_err(emc->dev, "mc clock preparation failed: %d\n", err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = false;
+ emc->mc_override = mc_readl(emc->mc, MC_EMEM_ARB_OVERRIDE);
+ emc->emc_cfg = readl_relaxed(emc->regs + EMC_CFG);
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+
+ if (emc->dll_on == !!(timing->emc_mode_1 & 0x1))
+ dll_change = DLL_CHANGE_NONE;
+ else if (timing->emc_mode_1 & 0x1)
+ dll_change = DLL_CHANGE_ON;
+ else
+ dll_change = DLL_CHANGE_OFF;
+
+ emc->dll_on = !!(timing->emc_mode_1 & 0x1);
+
+ if (timing->data[80] && !readl_relaxed(emc->regs + EMC_ZCAL_INTERVAL))
+ emc->zcal_long = true;
+ else
+ emc->zcal_long = false;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ dram_num = tegra_mc_get_emem_device_count(emc->mc);
+
+ /* disable dynamic self-refresh */
+ if (emc->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
+ emc->emc_cfg &= ~EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+
+ pre_wait = 5;
+ }
+
+ /* update MC arbiter settings */
+ val = mc_readl(emc->mc, MC_EMEM_ARB_OUTSTANDING_REQ);
+ if (!(val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
+ ((val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > 0x50)) {
+
+ val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
+ MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50;
+ mc_writel(emc->mc, val, MC_EMEM_ARB_OUTSTANDING_REQ);
+ mc_writel(emc->mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+ }
+
+ if (emc->mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK)
+ mc_writel(emc->mc,
+ emc->mc_override & ~MC_EMEM_ARB_OVERRIDE_EACK_MASK,
+ MC_EMEM_ARB_OVERRIDE);
+
+ /* check DQ/DQS VREF delay */
+ if (emc_dqs_preset(emc, timing, &schmitt_to_vref)) {
+ if (pre_wait < 3)
+ pre_wait = 3;
+ }
+
+ if (pre_wait) {
+ err = emc_seq_update_timing(emc);
+ if (err)
+ return err;
+
+ udelay(pre_wait);
+ }
+
+ /* disable auto-calibration if VREF mode is switching */
+ if (timing->emc_auto_cal_interval) {
+ val = readl_relaxed(emc->regs + EMC_XM2COMPPADCTRL);
+ val ^= timing->data[74];
+
+ if (val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE) {
+ writel_relaxed(0, emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ err = readl_relaxed_poll_timeout_atomic(
+ emc->regs + EMC_AUTO_CAL_STATUS, val,
+ !(val & EMC_AUTO_CAL_STATUS_ACTIVE), 1, 300);
+ if (err) {
+ dev_err(emc->dev,
+ "failed to disable auto-cal: %d\n",
+ err);
+ return err;
+ }
+
+ emc->vref_cal_toggle = true;
+ }
+ }
+
+ /* program shadow registers */
+ for (i = 0; i < ARRAY_SIZE(timing->data); i++) {
+ /* EMC_XM2CLKPADCTRL should be programmed separately */
+ if (i != 73)
+ writel_relaxed(timing->data[i],
+ emc->regs + emc_timing_registers[i]);
+ }
+
+ err = tegra_mc_write_emem_configuration(emc->mc, timing->rate);
+ if (err)
+ return err;
+
+ /* DDR3: predict MRS long wait count */
+ if (dram_type == DRAM_TYPE_DDR3 && dll_change == DLL_CHANGE_ON) {
+ u32 cnt = 512;
+
+ if (emc->zcal_long)
+ cnt -= dram_num * 256;
+
+ val = timing->data[82] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK;
+ if (cnt < val)
+ cnt = val;
+
+ val = timing->data[82] & ~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+ val |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
+ EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
+
+ writel_relaxed(val, emc->regs + EMC_MRS_WAIT_CNT);
+ }
+
+ /* disable interrupt since read access is prohibited after stalling */
+ disable_irq(emc->irq);
+
+ /* this read also completes the writes */
+ val = readl_relaxed(emc->regs + EMC_SEL_DPD_CTRL);
+
+ if (!(val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) && schmitt_to_vref) {
+ u32 cur_mode, new_mode;
+
+ cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK;
+ cur_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ new_mode = timing->data[39] & EMC_CFG5_QUSE_MODE_MASK;
+ new_mode >>= EMC_CFG5_QUSE_MODE_SHIFT;
+
+ if ((cur_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ cur_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK) ||
+ (new_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN &&
+ new_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK))
+ qrst_used = true;
+ }
+
+ /* flow control marker 1 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE);
+
+ /* enable periodic reset */
+ if (qrst_used) {
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE,
+ emc->regs + EMC_DBG);
+ writel_relaxed(emc->emc_cfg | EMC_CFG_PERIODIC_QRST,
+ emc->regs + EMC_CFG);
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ }
+
+ /* disable auto-refresh to save time after clock change */
+ writel_relaxed(EMC_REFCTRL_DISABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* turn off DLL and enter self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (dll_change == DLL_CHANGE_OFF)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ writel_relaxed(DRAM_BROADCAST(dram_num) |
+ EMC_SELF_REF_CMD_ENABLED,
+ emc->regs + EMC_SELF_REF);
+ }
+
+ /* flow control marker 2 */
+ writel_relaxed(0x1, emc->regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
+
+ /* enable write-active MUX, update unshadowed pad control */
+ writel_relaxed(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, emc->regs + EMC_DBG);
+ writel_relaxed(timing->data[73], emc->regs + EMC_XM2CLKPADCTRL);
+
+ /* restore periodic QRST and disable write-active MUX */
+ val = !!(emc->emc_cfg & EMC_CFG_PERIODIC_QRST);
+ if (qrst_used || timing->emc_cfg_periodic_qrst != val) {
+ if (timing->emc_cfg_periodic_qrst)
+ emc->emc_cfg |= EMC_CFG_PERIODIC_QRST;
+ else
+ emc->emc_cfg &= ~EMC_CFG_PERIODIC_QRST;
+
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ /* exit self-refresh on DDR3 */
+ if (dram_type == DRAM_TYPE_DDR3)
+ writel_relaxed(DRAM_BROADCAST(dram_num),
+ emc->regs + EMC_SELF_REF);
+
+ /* set DRAM-mode registers */
+ if (dram_type == DRAM_TYPE_DDR3) {
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_EMRS);
+
+ if (timing->emc_mode_reset != emc->emc_mode_reset ||
+ dll_change == DLL_CHANGE_ON) {
+ val = timing->emc_mode_reset;
+ if (dll_change == DLL_CHANGE_ON) {
+ val |= EMC_MODE_SET_DLL_RESET;
+ val |= EMC_MODE_SET_LONG_CNT;
+ } else {
+ val &= ~EMC_MODE_SET_DLL_RESET;
+ }
+ writel_relaxed(val, emc->regs + EMC_MRS);
+ }
+ } else {
+ if (timing->emc_mode_2 != emc->emc_mode_2)
+ writel_relaxed(timing->emc_mode_2,
+ emc->regs + EMC_MRW);
+
+ if (timing->emc_mode_1 != emc->emc_mode_1)
+ writel_relaxed(timing->emc_mode_1,
+ emc->regs + EMC_MRW);
+ }
+
+ emc->emc_mode_1 = timing->emc_mode_1;
+ emc->emc_mode_2 = timing->emc_mode_2;
+ emc->emc_mode_reset = timing->emc_mode_reset;
+
+ /* issue ZCAL command if turning ZCAL on */
+ if (emc->zcal_long) {
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV0,
+ emc->regs + EMC_ZQ_CAL);
+
+ if (dram_num > 1)
+ writel_relaxed(EMC_ZQ_CAL_LONG_CMD_DEV1,
+ emc->regs + EMC_ZQ_CAL);
+ }
+
+ /* re-enable auto-refresh */
+ writel_relaxed(EMC_REFCTRL_ENABLE_ALL(dram_num),
+ emc->regs + EMC_REFCTRL);
+
+ /* flow control marker 3 */
+ writel_relaxed(0x1, emc->regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE);
+
+ reinit_completion(&emc->clk_handshake_complete);
+
+ /* interrupt can be re-enabled now */
+ enable_irq(emc->irq);
+
+ emc->bad_state = false;
+ emc->prepared = true;
+
+ return 0;
+}
+
+static int emc_complete_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ struct emc_timing *timing = emc_find_timing(emc, rate);
+ unsigned long timeout;
+ int ret;
+
+ timeout = wait_for_completion_timeout(&emc->clk_handshake_complete,
+ msecs_to_jiffies(100));
+ if (timeout == 0) {
+ dev_err(emc->dev, "emc-car handshake failed\n");
+ emc->bad_state = true;
+ return -EIO;
+ }
+
+ /* restore auto-calibration */
+ if (emc->vref_cal_toggle)
+ writel_relaxed(timing->emc_auto_cal_interval,
+ emc->regs + EMC_AUTO_CAL_INTERVAL);
+
+ /* restore dynamic self-refresh */
+ if (timing->emc_cfg_dyn_self_ref) {
+ emc->emc_cfg |= EMC_CFG_DYN_SREF_ENABLE;
+ writel_relaxed(emc->emc_cfg, emc->regs + EMC_CFG);
+ }
+
+ /* set number of clocks to wait after each ZQ command */
+ if (emc->zcal_long)
+ writel_relaxed(timing->emc_zcal_cnt_long,
+ emc->regs + EMC_ZCAL_WAIT_CNT);
+
+ udelay(2);
+ /* update restored timing */
+ ret = emc_seq_update_timing(emc);
+ if (ret)
+ emc->bad_state = true;
+
+ /* restore early ACK */
+ mc_writel(emc->mc, emc->mc_override, MC_EMEM_ARB_OVERRIDE);
+
+ emc->prepared = false;
+
+ return ret;
+}
+
+static int emc_unprepare_timing_change(struct tegra_emc *emc,
+ unsigned long rate)
+{
+ if (emc->prepared && !emc->bad_state) {
+ /* shouldn't ever happen in practice */
+ dev_err(emc->dev, "timing configuration can't be reverted\n");
+ emc->bad_state = true;
+ }
+
+ return 0;
+}
+
+static int emc_clk_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct tegra_emc *emc = container_of(nb, struct tegra_emc, clk_nb);
+ struct clk_notifier_data *cnd = data;
+ int err;
+
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ err = emc_prepare_timing_change(emc, cnd->new_rate);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = emc_unprepare_timing_change(emc, cnd->old_rate);
+ break;
+
+ case POST_RATE_CHANGE:
+ err = emc_complete_timing_change(emc, cnd->new_rate);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int load_one_timing_from_dt(struct tegra_emc *emc,
+ struct emc_timing *timing,
+ struct device_node *node)
+{
+ u32 value;
+ int err;
+
+ err = of_property_read_u32(node, "clock-frequency", &value);
+ if (err) {
+ dev_err(emc->dev, "timing %pOF: failed to read rate: %d\n",
+ node, err);
+ return err;
+ }
+
+ timing->rate = value;
+
+ err = of_property_read_u32_array(node, "nvidia,emc-configuration",
+ timing->data,
+ ARRAY_SIZE(emc_timing_registers));
+ if (err) {
+ dev_err(emc->dev,
+ "timing %pOF: failed to read emc timing data: %d\n",
+ node, err);
+ return err;
+ }
+
+#define EMC_READ_BOOL(prop, dtprop) \
+ timing->prop = of_property_read_bool(node, dtprop);
+
+#define EMC_READ_U32(prop, dtprop) \
+ err = of_property_read_u32(node, dtprop, &timing->prop); \
+ if (err) { \
+ dev_err(emc->dev, \
+ "timing %pOFn: failed to read " #prop ": %d\n", \
+ node, err); \
+ return err; \
+ }
+
+ EMC_READ_U32(emc_auto_cal_interval, "nvidia,emc-auto-cal-interval")
+ EMC_READ_U32(emc_mode_1, "nvidia,emc-mode-1")
+ EMC_READ_U32(emc_mode_2, "nvidia,emc-mode-2")
+ EMC_READ_U32(emc_mode_reset, "nvidia,emc-mode-reset")
+ EMC_READ_U32(emc_zcal_cnt_long, "nvidia,emc-zcal-cnt-long")
+ EMC_READ_BOOL(emc_cfg_dyn_self_ref, "nvidia,emc-cfg-dyn-self-ref")
+ EMC_READ_BOOL(emc_cfg_periodic_qrst, "nvidia,emc-cfg-periodic-qrst")
+
+#undef EMC_READ_U32
+#undef EMC_READ_BOOL
+
+ dev_dbg(emc->dev, "%s: %pOF: rate %lu\n", __func__, node, timing->rate);
+
+ return 0;
+}
+
+static int cmp_timings(const void *_a, const void *_b)
+{
+ const struct emc_timing *a = _a;
+ const struct emc_timing *b = _b;
+
+ if (a->rate < b->rate)
+ return -1;
+
+ if (a->rate > b->rate)
+ return 1;
+
+ return 0;
+}
+
+static int emc_check_mc_timings(struct tegra_emc *emc)
+{
+ struct tegra_mc *mc = emc->mc;
+ unsigned int i;
+
+ if (emc->num_timings != mc->num_timings) {
+ dev_err(emc->dev, "emc/mc timings number mismatch: %u %u\n",
+ emc->num_timings, mc->num_timings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mc->num_timings; i++) {
+ if (emc->timings[i].rate != mc->timings[i].rate) {
+ dev_err(emc->dev,
+ "emc/mc timing rate mismatch: %lu %lu\n",
+ emc->timings[i].rate, mc->timings[i].rate);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int emc_load_timings_from_dt(struct tegra_emc *emc,
+ struct device_node *node)
+{
+ struct device_node *child;
+ struct emc_timing *timing;
+ int child_count;
+ int err;
+
+ child_count = of_get_child_count(node);
+ if (!child_count) {
+ dev_err(emc->dev, "no memory timings in: %pOF\n", node);
+ return -EINVAL;
+ }
+
+ emc->timings = devm_kcalloc(emc->dev, child_count, sizeof(*timing),
+ GFP_KERNEL);
+ if (!emc->timings)
+ return -ENOMEM;
+
+ emc->num_timings = child_count;
+ timing = emc->timings;
+
+ for_each_child_of_node(node, child) {
+ err = load_one_timing_from_dt(emc, timing++, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
+ NULL);
+
+ err = emc_check_mc_timings(emc);
+ if (err)
+ return err;
+
+ dev_info(emc->dev,
+ "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
+ emc->num_timings,
+ tegra_read_ram_code(),
+ emc->timings[0].rate / 1000000,
+ emc->timings[emc->num_timings - 1].rate / 1000000);
+
+ return 0;
+}
+
+static struct device_node *emc_find_node_by_ram_code(struct device *dev)
+{
+ struct device_node *np;
+ u32 value, ram_code;
+ int err;
+
+ ram_code = tegra_read_ram_code();
+
+ for_each_child_of_node(dev->of_node, np) {
+ err = of_property_read_u32(np, "nvidia,ram-code", &value);
+ if (err || value != ram_code)
+ continue;
+
+ return np;
+ }
+
+ dev_err(dev, "no memory timings for RAM code %u found in device-tree\n",
+ ram_code);
+
+ return NULL;
+}
+
+static int emc_setup_hw(struct tegra_emc *emc)
+{
+ u32 intmask = EMC_REFRESH_OVERFLOW_INT | EMC_CLKCHANGE_COMPLETE_INT;
+ u32 fbio_cfg5, emc_cfg, emc_dbg;
+ enum emc_dram_type dram_type;
+
+ fbio_cfg5 = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+ dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK;
+
+ emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
+
+ /* enable EMC and CAR to handshake on PLL divider/source changes */
+ emc_cfg |= EMC_CLKCHANGE_REQ_ENABLE;
+
+ /* configure clock change mode accordingly to DRAM type */
+ switch (dram_type) {
+ case DRAM_TYPE_LPDDR2:
+ emc_cfg |= EMC_CLKCHANGE_PD_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ break;
+
+ default:
+ emc_cfg &= ~EMC_CLKCHANGE_SR_ENABLE;
+ emc_cfg &= ~EMC_CLKCHANGE_PD_ENABLE;
+ break;
+ }
+
+ writel_relaxed(emc_cfg, emc->regs + EMC_CFG_2);
+
+ /* initialize interrupt */
+ writel_relaxed(intmask, emc->regs + EMC_INTMASK);
+ writel_relaxed(0xffffffff, emc->regs + EMC_INTSTATUS);
+
+ /* ensure that unwanted debug features are disabled */
+ emc_dbg = readl_relaxed(emc->regs + EMC_DBG);
+ emc_dbg |= EMC_DBG_CFG_PRIORITY;
+ emc_dbg &= ~EMC_DBG_READ_MUX_ASSEMBLY;
+ emc_dbg &= ~EMC_DBG_WRITE_MUX_ACTIVE;
+ emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
+ writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+
+ return 0;
+}
+
+static long emc_round_rate(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg)
+{
+ struct emc_timing *timing = NULL;
+ struct tegra_emc *emc = arg;
+ unsigned int i;
+
+ min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
+
+ for (i = 0; i < emc->num_timings; i++) {
+ if (emc->timings[i].rate < rate && i != emc->num_timings - 1)
+ continue;
+
+ if (emc->timings[i].rate > max_rate) {
+ i = max(i, 1u) - 1;
+
+ if (emc->timings[i].rate < min_rate)
+ break;
+ }
+
+ if (emc->timings[i].rate < min_rate)
+ continue;
+
+ timing = &emc->timings[i];
+ break;
+ }
+
+ if (!timing) {
+ dev_err(emc->dev, "no timing for rate %lu min %lu max %lu\n",
+ rate, min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ return timing->rate;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct platform_device *mc;
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int err;
+
+ if (of_get_child_count(pdev->dev.of_node) == 0) {
+ dev_info(&pdev->dev,
+ "device-tree node doesn't have memory timings\n");
+ return 0;
+ }
+
+ np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "could not get memory controller node\n");
+ return -ENOENT;
+ }
+
+ mc = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!mc)
+ return -ENOENT;
+
+ np = emc_find_node_by_ram_code(&pdev->dev);
+ if (!np)
+ return -EINVAL;
+
+ emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
+ if (!emc) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+
+ emc->mc = platform_get_drvdata(mc);
+ if (!emc->mc)
+ return -EPROBE_DEFER;
+
+ init_completion(&emc->clk_handshake_complete);
+ emc->clk_nb.notifier_call = emc_clk_change_notify;
+ emc->dev = &pdev->dev;
+
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
+
+ err = emc_setup_hw(emc);
+ if (err)
+ return err;
+
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "interrupt not specified: %d\n", err);
+ return err;
+ }
+ emc->irq = err;
+
+ err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
+ dev_name(&pdev->dev), emc);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request irq: %d\n", err);
+ return err;
+ }
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ emc->clk = devm_clk_get(&pdev->dev, "emc");
+ if (IS_ERR(emc->clk)) {
+ err = PTR_ERR(emc->clk);
+ dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
+ goto unset_cb;
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
+ err);
+ goto unset_cb;
+ }
+
+ platform_set_drvdata(pdev, emc);
+
+ return 0;
+
+unset_cb:
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+
+ return err;
+}
+
+static int tegra_emc_suspend(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ /*
+ * Suspending in a bad state will hang machine. The "prepared" var
+ * shall be always false here unless it's a kernel bug that caused
+ * suspending in a wrong order.
+ */
+ if (WARN_ON(emc->prepared) || emc->bad_state)
+ return -EINVAL;
+
+ emc->bad_state = true;
+
+ return 0;
+}
+
+static int tegra_emc_resume(struct device *dev)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ emc_setup_hw(emc);
+ emc->bad_state = false;
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_emc_pm_ops = {
+ .suspend = tegra_emc_suspend,
+ .resume = tegra_emc_resume,
+};
+
+static const struct of_device_id tegra_emc_of_match[] = {
+ { .compatible = "nvidia,tegra30-emc", },
+ {},
+};
+
+static struct platform_driver tegra_emc_driver = {
+ .probe = tegra_emc_probe,
+ .driver = {
+ .name = "tegra30-emc",
+ .of_match_table = tegra_emc_of_match,
+ .pm = &tegra_emc_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init tegra_emc_init(void)
+{
+ return platform_driver_register(&tegra_emc_driver);
+}
+subsys_initcall(tegra_emc_init);
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 14788fc2f9e8..fcdd812eed80 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -10,6 +10,27 @@
#include "mc.h"
+static const unsigned long tegra30_mc_emem_regs[] = {
+ MC_EMEM_ARB_CFG,
+ MC_EMEM_ARB_OUTSTANDING_REQ,
+ MC_EMEM_ARB_TIMING_RCD,
+ MC_EMEM_ARB_TIMING_RP,
+ MC_EMEM_ARB_TIMING_RC,
+ MC_EMEM_ARB_TIMING_RAS,
+ MC_EMEM_ARB_TIMING_FAW,
+ MC_EMEM_ARB_TIMING_RRD,
+ MC_EMEM_ARB_TIMING_RAP2PRE,
+ MC_EMEM_ARB_TIMING_WAP2PRE,
+ MC_EMEM_ARB_TIMING_R2R,
+ MC_EMEM_ARB_TIMING_W2W,
+ MC_EMEM_ARB_TIMING_R2W,
+ MC_EMEM_ARB_TIMING_W2R,
+ MC_EMEM_ARB_DA_TURNS,
+ MC_EMEM_ARB_DA_COVERS,
+ MC_EMEM_ARB_MISC0,
+ MC_EMEM_ARB_RING1_THROTTLE,
+};
+
static const struct tegra_mc_client tegra30_mc_clients[] = {
{
.id = 0x00,
@@ -931,16 +952,19 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
{ .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
};
-static const unsigned int tegra30_group_display[] = {
+static const unsigned int tegra30_group_drm[] = {
TEGRA_SWGROUP_DC,
TEGRA_SWGROUP_DCB,
+ TEGRA_SWGROUP_G2,
+ TEGRA_SWGROUP_NV,
+ TEGRA_SWGROUP_NV2,
};
static const struct tegra_smmu_group_soc tegra30_groups[] = {
{
- .name = "display",
- .swgroups = tegra30_group_display,
- .num_swgroups = ARRAY_SIZE(tegra30_group_display),
+ .name = "drm",
+ .swgroups = tegra30_group_drm,
+ .num_swgroups = ARRAY_SIZE(tegra30_group_drm),
},
};
@@ -994,6 +1018,8 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.atom_size = 16,
.client_id_mask = 0x7f,
.smmu = &tegra30_smmu_soc,
+ .emem_regs = tegra30_mc_emem_regs,
+ .num_emem_regs = ARRAY_SIZE(tegra30_mc_emem_regs),
.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
MC_INT_DECERR_EMEM,
.reset_ops = &tegra_mc_reset_ops_common,