diff options
author | Arnd Bergmann <arnd@arndb.de> | 2020-05-25 23:19:05 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2020-05-25 23:19:06 +0200 |
commit | 502afe7f0432248c9ee18263f404cf491d91f604 (patch) | |
tree | d6c9efce9421a8d1e4f77158f5cd0d561a376383 /drivers/soc | |
parent | Merge tag 'v5.7-next-soc.2' of git://git.kernel.org/pub/scm/linux/kernel/git/... (diff) | |
parent | Revert "soc: qcom: rpmh: Allow RPMH driver to be loaded as a module" (diff) | |
download | linux-502afe7f0432248c9ee18263f404cf491d91f604.tar.xz linux-502afe7f0432248c9ee18263f404cf491d91f604.zip |
Merge tag 'qcom-drivers-for-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux into arm/drivers
Qualcomm driver updates for v5.8
This contains a large set of cleanups, bug fixes, general improvements
and documentation fixes for the RPMH driver. It adds a debugfs mechanism
for inspecting Command DB. Socinfo got the "soc_id" attribute defines
and definitions for a various variants of MSM8939.
RPMH, RPMPD and RPMHPD where made possible to build as modules, but RPMH
had to be reverted due to a compilation issue when tracing is enabled.
RPMHPD gained power-domains for the SM8250 voltage corners.
The SCM driver gained fixes for two build warnings and the SMP2P had an
unnecessary error print removed.
* tag 'qcom-drivers-for-5.8' of git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux: (42 commits)
Revert "soc: qcom: rpmh: Allow RPMH driver to be loaded as a module"
soc: qcom: rpmh-rsc: Remove the pm_lock
soc: qcom: rpmh-rsc: Simplify locking by eliminating the per-TCS lock
kernel/cpu_pm: Fix uninitted local in cpu_pm
soc: qcom: rpmh-rsc: We aren't notified of our own failure w/ NOTIFY_BAD
soc: qcom: rpmh-rsc: Correctly ignore CPU_CLUSTER_PM notifications
firmware: qcom_scm-legacy: Replace zero-length array with flexible-array
soc: qcom: rpmh-rsc: Timeout after 1 second in write_tcs_reg_sync()
soc: qcom: rpmh-rsc: Factor "tcs_reg_addr" and "tcs_cmd_addr" calculation
soc: qcom: socinfo: add msm8936/39 and apq8036/39 soc ids
soc: qcom: aoss: Add SM8250 compatible
soc: qcom: pdr: Remove impossible error condition
soc: qcom: rpmh: Dirt can only make you dirtier, not cleaner
soc: qcom: rpmhpd: Add SM8250 power domains
firmware: qcom_scm: fix bogous abuse of dma-direct internals
dt-bindings: soc: qcom: apr: Use generic node names for APR services
firmware: qcom_scm: Remove unneeded conversion to bool
soc: qcom: cmd-db: Properly endian swap the slv_id for debugfs
soc: qcom: cmd-db: Use 5 digits for printing address
soc: qcom: cmd-db: Cast sizeof() to int to silence field width warning
...
Link: https://lore.kernel.org/r/20200519052533.1250024-1-bjorn.andersson@linaro.org
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/soc')
-rw-r--r-- | drivers/soc/qcom/Kconfig | 6 | ||||
-rw-r--r-- | drivers/soc/qcom/cmd-db.c | 78 | ||||
-rw-r--r-- | drivers/soc/qcom/pdr_interface.c | 4 | ||||
-rw-r--r-- | drivers/soc/qcom/qcom_aoss.c | 1 | ||||
-rw-r--r-- | drivers/soc/qcom/rpmh-internal.h | 59 | ||||
-rw-r--r-- | drivers/soc/qcom/rpmh-rsc.c | 746 | ||||
-rw-r--r-- | drivers/soc/qcom/rpmh.c | 97 | ||||
-rw-r--r-- | drivers/soc/qcom/rpmhpd.c | 24 | ||||
-rw-r--r-- | drivers/soc/qcom/rpmpd.c | 5 | ||||
-rw-r--r-- | drivers/soc/qcom/smp2p.c | 4 | ||||
-rw-r--r-- | drivers/soc/qcom/socinfo.c | 6 |
11 files changed, 743 insertions, 287 deletions
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index bf42a17a45de..19332ea40234 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -117,7 +117,7 @@ config QCOM_RPMH help apply the aggregated state on the resource. config QCOM_RPMHPD - bool "Qualcomm RPMh Power domain driver" + tristate "Qualcomm RPMh Power domain driver" depends on QCOM_RPMH && QCOM_COMMAND_DB help QCOM RPMh Power domain driver to support power-domains with @@ -126,8 +126,8 @@ config QCOM_RPMHPD for the voltage rail. config QCOM_RPMPD - bool "Qualcomm RPM Power domain driver" - depends on QCOM_SMD_RPM=y + tristate "Qualcomm RPM Power domain driver" + depends on QCOM_SMD_RPM help QCOM RPM Power domain driver to support power-domains with performance states. The driver communicates a performance state diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c index f6c3d17b05c7..fc5610603b17 100644 --- a/drivers/soc/qcom/cmd-db.c +++ b/drivers/soc/qcom/cmd-db.c @@ -1,12 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */ +#include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_platform.h> #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> +#include <linux/seq_file.h> #include <linux/types.h> #include <soc/qcom/cmd-db.h> @@ -236,6 +237,77 @@ enum cmd_db_hw_type cmd_db_read_slave_id(const char *id) } EXPORT_SYMBOL(cmd_db_read_slave_id); +#ifdef CONFIG_DEBUG_FS +static int cmd_db_debugfs_dump(struct seq_file *seq, void *p) +{ + int i, j; + const struct rsc_hdr *rsc; + const struct entry_header *ent; + const char *name; + u16 len, version; + u8 major, minor; + + seq_puts(seq, "Command DB DUMP\n"); + + for (i = 0; i < MAX_SLV_ID; i++) { + rsc = &cmd_db_header->header[i]; + if (!rsc->slv_id) + break; + + switch (le16_to_cpu(rsc->slv_id)) { + case CMD_DB_HW_ARC: + name = "ARC"; + break; + case CMD_DB_HW_VRM: + name = "VRM"; + break; + case CMD_DB_HW_BCM: + name = "BCM"; + break; + default: + name = "Unknown"; + break; + } + + version = le16_to_cpu(rsc->version); + major = version >> 8; + minor = version; + + seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor); + seq_puts(seq, "-------------------------\n"); + + ent = rsc_to_entry_header(rsc); + for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) { + seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr), + (int)sizeof(ent->id), ent->id); + + len = le16_to_cpu(ent->len); + if (len) { + seq_printf(seq, " [%*ph]", + len, rsc_offset(rsc, ent)); + } + seq_putc(seq, '\n'); + } + } + + return 0; +} + +static int open_cmd_db_debugfs(struct inode *inode, struct file *file) +{ + return single_open(file, cmd_db_debugfs_dump, inode->i_private); +} +#endif + +static const struct file_operations cmd_db_debugfs_ops = { +#ifdef CONFIG_DEBUG_FS + .open = open_cmd_db_debugfs, +#endif + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int cmd_db_dev_probe(struct platform_device *pdev) { struct reserved_mem *rmem; @@ -259,12 +331,14 @@ static int cmd_db_dev_probe(struct platform_device *pdev) return -EINVAL; } + debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops); + return 0; } static const struct of_device_id cmd_db_match_table[] = { { .compatible = "qcom,cmd-db" }, - { }, + { } }; static struct platform_driver cmd_db_dev_driver = { diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index 17ad3b8698e1..bdcf16f88a97 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -155,10 +155,6 @@ static int pdr_register_listener(struct pdr_handle *pdr, return ret; } - if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX) - pr_err("PDR: %s notification state invalid: 0x%x\n", - pds->service_path, resp.curr_state); - pds->state = resp.curr_state; return 0; diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c index f43a2e07ee83..ed2c687c16b3 100644 --- a/drivers/soc/qcom/qcom_aoss.c +++ b/drivers/soc/qcom/qcom_aoss.c @@ -599,6 +599,7 @@ static const struct of_device_id qmp_dt_match[] = { { .compatible = "qcom,sc7180-aoss-qmp", }, { .compatible = "qcom,sdm845-aoss-qmp", }, { .compatible = "qcom,sm8150-aoss-qmp", }, + { .compatible = "qcom,sm8250-aoss-qmp", }, {} }; MODULE_DEVICE_TABLE(of, qmp_dt_match); diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h index 6eec32b97f83..ef60e790a750 100644 --- a/drivers/soc/qcom/rpmh-internal.h +++ b/drivers/soc/qcom/rpmh-internal.h @@ -22,16 +22,23 @@ struct rsc_drv; * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests * to the controller * - * @drv: the controller - * @type: type of the TCS in this group - active, sleep, wake - * @mask: mask of the TCSes relative to all the TCSes in the RSC - * @offset: start of the TCS group relative to the TCSes in the RSC - * @num_tcs: number of TCSes in this type - * @ncpt: number of commands in each TCS - * @lock: lock for synchronizing this TCS writes - * @req: requests that are sent from the TCS - * @cmd_cache: flattened cache of cmds in sleep/wake TCS - * @slots: indicates which of @cmd_addr are occupied + * @drv: The controller. + * @type: Type of the TCS in this group - active, sleep, wake. + * @mask: Mask of the TCSes relative to all the TCSes in the RSC. + * @offset: Start of the TCS group relative to the TCSes in the RSC. + * @num_tcs: Number of TCSes in this type. + * @ncpt: Number of commands in each TCS. + * @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY + * transfers (could be on a wake/sleep TCS if we are borrowing for + * an ACTIVE_ONLY transfer). + * Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock, + * trigger + * End: get irq, access req, + * grab drv->lock, clear tcs_in_use, drop drv->lock + * @slots: Indicates which of @cmd_addr are occupied; only used for + * SLEEP / WAKE TCSs. Things are tightly packed in the + * case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and + * MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS. */ struct tcs_group { struct rsc_drv *drv; @@ -40,9 +47,7 @@ struct tcs_group { u32 offset; int num_tcs; int ncpt; - spinlock_t lock; const struct tcs_request *req[MAX_TCS_PER_TYPE]; - u32 *cmd_cache; DECLARE_BITMAP(slots, MAX_TCS_SLOTS); }; @@ -84,20 +89,32 @@ struct rpmh_ctrlr { * struct rsc_drv: the Direct Resource Voter (DRV) of the * Resource State Coordinator controller (RSC) * - * @name: controller identifier - * @tcs_base: start address of the TCS registers in this controller - * @id: instance id in the controller (Direct Resource Voter) - * @num_tcs: number of TCSes in this DRV - * @tcs: TCS groups - * @tcs_in_use: s/w state of the TCS - * @lock: synchronize state of the controller - * @client: handle to the DRV's client. + * @name: Controller identifier. + * @tcs_base: Start address of the TCS registers in this controller. + * @id: Instance id in the controller (Direct Resource Voter). + * @num_tcs: Number of TCSes in this DRV. + * @rsc_pm: CPU PM notifier for controller. + * Used when solver mode is not present. + * @cpus_in_pm: Number of CPUs not in idle power collapse. + * Used when solver mode is not present. + * @tcs: TCS groups. + * @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY + * transfers, but might show a sleep/wake TCS in use if + * it was borrowed for an active_only transfer. You + * must hold the lock in this struct (AKA drv->lock) in + * order to update this. + * @lock: Synchronize state of the controller. If RPMH's cache + * lock will also be held, the order is: drv->lock then + * cache_lock. + * @client: Handle to the DRV's client. */ struct rsc_drv { const char *name; void __iomem *tcs_base; int id; int num_tcs; + struct notifier_block rsc_pm; + atomic_t cpus_in_pm; struct tcs_group tcs[TCS_TYPE_NR]; DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR); spinlock_t lock; @@ -107,7 +124,7 @@ struct rsc_drv { int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg); int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg); -int rpmh_rsc_invalidate(struct rsc_drv *drv); +void rpmh_rsc_invalidate(struct rsc_drv *drv); void rpmh_tx_done(const struct tcs_request *msg, int r); int rpmh_flush(struct rpmh_ctrlr *ctrlr); diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c index b71822131f59..076fd27f3081 100644 --- a/drivers/soc/qcom/rpmh-rsc.c +++ b/drivers/soc/qcom/rpmh-rsc.c @@ -6,9 +6,11 @@ #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME #include <linux/atomic.h> +#include <linux/cpu_pm.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/of.h> @@ -30,21 +32,41 @@ #define RSC_DRV_TCS_OFFSET 672 #define RSC_DRV_CMD_OFFSET 20 -/* DRV Configuration Information Register */ +/* DRV HW Solver Configuration Information Register */ +#define DRV_SOLVER_CONFIG 0x04 +#define DRV_HW_SOLVER_MASK 1 +#define DRV_HW_SOLVER_SHIFT 24 + +/* DRV TCS Configuration Information Register */ #define DRV_PRNT_CHLD_CONFIG 0x0C #define DRV_NUM_TCS_MASK 0x3F #define DRV_NUM_TCS_SHIFT 6 #define DRV_NCPT_MASK 0x1F #define DRV_NCPT_SHIFT 27 -/* Register offsets */ +/* Offsets for common TCS Registers, one bit per TCS */ #define RSC_DRV_IRQ_ENABLE 0x00 #define RSC_DRV_IRQ_STATUS 0x04 -#define RSC_DRV_IRQ_CLEAR 0x08 -#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 +#define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */ + +/* + * Offsets for per TCS Registers. + * + * TCSes start at 0x10 from tcs_base and are stored one after another. + * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one + * of the below to find a register. + */ +#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */ #define RSC_DRV_CONTROL 0x14 -#define RSC_DRV_STATUS 0x18 -#define RSC_DRV_CMD_ENABLE 0x1C +#define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */ +#define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */ + +/* + * Offsets for per command in a TCS. + * + * Commands (up to 16) start at 0x30 in a TCS; multiply command index + * by RSC_DRV_CMD_OFFSET and add one of the below to find a register. + */ #define RSC_DRV_CMD_MSGID 0x30 #define RSC_DRV_CMD_ADDR 0x34 #define RSC_DRV_CMD_DATA 0x38 @@ -61,94 +83,179 @@ #define CMD_STATUS_ISSUED BIT(8) #define CMD_STATUS_COMPL BIT(16) -static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) +/* + * Here's a high level overview of how all the registers in RPMH work + * together: + * + * - The main rpmh-rsc address is the base of a register space that can + * be used to find overall configuration of the hardware + * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register + * space are all the TCS blocks. The offset of the TCS blocks is + * specified in the device tree by "qcom,tcs-offset" and used to + * compute tcs_base. + * - TCS blocks come one after another. Type, count, and order are + * specified by the device tree as "qcom,tcs-config". + * - Each TCS block has some registers, then space for up to 16 commands. + * Note that though address space is reserved for 16 commands, fewer + * might be present. See ncpt (num cmds per TCS). + * + * Here's a picture: + * + * +---------------------------------------------------+ + * |RSC | + * | ctrl | + * | | + * | Drvs: | + * | +-----------------------------------------------+ | + * | |DRV0 | | + * | | ctrl/config | | + * | | IRQ | | + * | | | | + * | | TCSes: | | + * | | +------------------------------------------+ | | + * | | |TCS0 | | | | | | | | | | | | | | | + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | + * | | | | | | | | | | | | | | | | | | + * | | +------------------------------------------+ | | + * | | +------------------------------------------+ | | + * | | |TCS1 | | | | | | | | | | | | | | | + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | + * | | | | | | | | | | | | | | | | | | + * | | +------------------------------------------+ | | + * | | +------------------------------------------+ | | + * | | |TCS2 | | | | | | | | | | | | | | | + * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | + * | | | | | | | | | | | | | | | | | | + * | | +------------------------------------------+ | | + * | | ...... | | + * | +-----------------------------------------------+ | + * | +-----------------------------------------------+ | + * | |DRV1 | | + * | | (same as DRV0) | | + * | +-----------------------------------------------+ | + * | ...... | + * +---------------------------------------------------+ + */ + +static inline void __iomem * +tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id) { - return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id + - RSC_DRV_CMD_OFFSET * cmd_id); + return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg; } -static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id, - u32 data) +static inline void __iomem * +tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) { - writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id + - RSC_DRV_CMD_OFFSET * cmd_id); + return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id; } -static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data) +static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, + int cmd_id) { - writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id); + return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); } -static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id, - u32 data) +static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id) { - writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id); - for (;;) { - if (data == readl(drv->tcs_base + reg + - RSC_DRV_TCS_OFFSET * tcs_id)) - break; - udelay(1); - } + return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id)); } -static bool tcs_is_free(struct rsc_drv *drv, int tcs_id) +static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, + int cmd_id, u32 data) { - return !test_bit(tcs_id, drv->tcs_in_use) && - read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0); + writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); } -static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type) +static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id, + u32 data) { - return &drv->tcs[type]; + writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id)); } -static int tcs_invalidate(struct rsc_drv *drv, int type) +static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id, + u32 data) { - int m; - struct tcs_group *tcs; + u32 new_data; - tcs = get_tcs_of_type(drv, type); + writel(data, tcs_reg_addr(drv, reg, tcs_id)); + if (readl_poll_timeout_atomic(tcs_reg_addr(drv, reg, tcs_id), new_data, + new_data == data, 1, USEC_PER_SEC)) + pr_err("%s: error writing %#x to %d:%#x\n", drv->name, + data, tcs_id, reg); +} - spin_lock(&tcs->lock); - if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) { - spin_unlock(&tcs->lock); - return 0; - } +/** + * tcs_is_free() - Return if a TCS is totally free. + * @drv: The RSC controller. + * @tcs_id: The global ID of this TCS. + * + * Returns true if nobody has claimed this TCS (by setting tcs_in_use). + * + * Context: Must be called with the drv->lock held. + * + * Return: true if the given TCS is free. + */ +static bool tcs_is_free(struct rsc_drv *drv, int tcs_id) +{ + return !test_bit(tcs_id, drv->tcs_in_use); +} + +/** + * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake). + * @drv: The RSC controller. + * @type: SLEEP_TCS or WAKE_TCS + * + * This will clear the "slots" variable of the given tcs_group and also + * tell the hardware to forget about all entries. + * + * The caller must ensure that no other RPMH actions are happening when this + * function is called, since otherwise the device may immediately become + * used again even before this function exits. + */ +static void tcs_invalidate(struct rsc_drv *drv, int type) +{ + int m; + struct tcs_group *tcs = &drv->tcs[type]; + + /* Caller ensures nobody else is running so no lock */ + if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) + return; for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) { - if (!tcs_is_free(drv, m)) { - spin_unlock(&tcs->lock); - return -EAGAIN; - } write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0); write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0); } bitmap_zero(tcs->slots, MAX_TCS_SLOTS); - spin_unlock(&tcs->lock); - - return 0; } /** - * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes + * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes. + * @drv: The RSC controller. * - * @drv: the RSC controller + * The caller must ensure that no other RPMH actions are happening when this + * function is called, since otherwise the device may immediately become + * used again even before this function exits. */ -int rpmh_rsc_invalidate(struct rsc_drv *drv) +void rpmh_rsc_invalidate(struct rsc_drv *drv) { - int ret; - - ret = tcs_invalidate(drv, SLEEP_TCS); - if (!ret) - ret = tcs_invalidate(drv, WAKE_TCS); - - return ret; + tcs_invalidate(drv, SLEEP_TCS); + tcs_invalidate(drv, WAKE_TCS); } +/** + * get_tcs_for_msg() - Get the tcs_group used to send the given message. + * @drv: The RSC controller. + * @msg: The message we want to send. + * + * This is normally pretty straightforward except if we are trying to send + * an ACTIVE_ONLY message but don't have any active_only TCSes. + * + * Return: A pointer to a tcs_group or an ERR_PTR. + */ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, const struct tcs_request *msg) { - int type, ret; + int type; struct tcs_group *tcs; switch (msg->state) { @@ -168,24 +275,33 @@ static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, /* * If we are making an active request on a RSC that does not have a * dedicated TCS for active state use, then re-purpose a wake TCS to - * send active votes. - * NOTE: The driver must be aware that this RSC does not have a - * dedicated AMC, and therefore would invalidate the sleep and wake - * TCSes before making an active state request. + * send active votes. This is safe because we ensure any active-only + * transfers have finished before we use it (maybe by running from + * the last CPU in PM code). */ - tcs = get_tcs_of_type(drv, type); - if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) { - tcs = get_tcs_of_type(drv, WAKE_TCS); - if (tcs->num_tcs) { - ret = rpmh_rsc_invalidate(drv); - if (ret) - return ERR_PTR(ret); - } - } + tcs = &drv->tcs[type]; + if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) + tcs = &drv->tcs[WAKE_TCS]; return tcs; } +/** + * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS. + * @drv: The RSC controller. + * @tcs_id: The global ID of this TCS. + * + * For ACTIVE_ONLY transfers we want to call back into the client when the + * transfer finishes. To do this we need the "request" that the client + * originally provided us. This function grabs the request that we stashed + * when we started the transfer. + * + * This only makes sense for ACTIVE_ONLY transfers since those are the only + * ones we track sending (the only ones we enable interrupts for and the only + * ones we call back to the client for). + * + * Return: The stashed request. + */ static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, int tcs_id) { @@ -202,7 +318,76 @@ static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, } /** - * tcs_tx_done: TX Done interrupt handler + * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS + * @drv: The controller. + * @tcs_id: The global ID of this TCS. + * @trigger: If true then untrigger/retrigger. If false then just untrigger. + * + * In the normal case we only ever call with "trigger=true" to start a + * transfer. That will un-trigger/disable the TCS from the last transfer + * then trigger/enable for this transfer. + * + * If we borrowed a wake TCS for an active-only transfer we'll also call + * this function with "trigger=false" to just do the un-trigger/disable + * before using the TCS for wake purposes again. + * + * Note that the AP is only in charge of triggering active-only transfers. + * The AP never triggers sleep/wake values using this function. + */ +static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) +{ + u32 enable; + + /* + * HW req: Clear the DRV_CONTROL and enable TCS again + * While clearing ensure that the AMC mode trigger is cleared + * and then the mode enable is cleared. + */ + enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id); + enable &= ~TCS_AMC_MODE_TRIGGER; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + enable &= ~TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + + if (trigger) { + /* Enable the AMC mode on the TCS and then trigger the TCS */ + enable = TCS_AMC_MODE_ENABLE; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + enable |= TCS_AMC_MODE_TRIGGER; + write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); + } +} + +/** + * enable_tcs_irq() - Enable or disable interrupts on the given TCS. + * @drv: The controller. + * @tcs_id: The global ID of this TCS. + * @enable: If true then enable; if false then disable + * + * We only ever call this when we borrow a wake TCS for an active-only + * transfer. For active-only TCSes interrupts are always left enabled. + */ +static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) +{ + u32 data; + + data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE); + if (enable) + data |= BIT(tcs_id); + else + data &= ~BIT(tcs_id); + writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE); +} + +/** + * tcs_tx_done() - TX Done interrupt handler. + * @irq: The IRQ number (ignored). + * @p: Pointer to "struct rsc_drv". + * + * Called for ACTIVE_ONLY transfers (those are the only ones we enable the + * IRQ for) when a transfer is done. + * + * Return: IRQ_HANDLED */ static irqreturn_t tcs_tx_done(int irq, void *p) { @@ -212,7 +397,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p) const struct tcs_request *req; struct tcs_cmd *cmd; - irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0); + irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS); for_each_set_bit(i, &irq_status, BITS_PER_LONG) { req = get_req_from_tcs(drv, i); @@ -226,7 +411,7 @@ static irqreturn_t tcs_tx_done(int irq, void *p) u32 sts; cmd = &req->cmds[j]; - sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j); + sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j); if (!(sts & CMD_STATUS_ISSUED) || ((req->wait_for_compl || cmd->wait) && !(sts & CMD_STATUS_COMPL))) { @@ -237,13 +422,28 @@ static irqreturn_t tcs_tx_done(int irq, void *p) } trace_rpmh_tx_done(drv, i, req, err); + + /* + * If wake tcs was re-purposed for sending active + * votes, clear AMC trigger & enable modes and + * disable interrupt for this TCS + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + __tcs_set_trigger(drv, i, false); skip: /* Reclaim the TCS */ write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0); write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0); - write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i)); + writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR); spin_lock(&drv->lock); clear_bit(i, drv->tcs_in_use); + /* + * Disable interrupt for WAKE TCS to avoid being + * spammed with interrupts coming when the solver + * sends its wake votes. + */ + if (!drv->tcs[ACTIVE_TCS].num_tcs) + enable_tcs_irq(drv, i, false); spin_unlock(&drv->lock); if (req) rpmh_tx_done(req, err); @@ -252,6 +452,16 @@ skip: return IRQ_HANDLED; } +/** + * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger. + * @drv: The controller. + * @tcs_id: The global ID of this TCS. + * @cmd_id: The index within the TCS to start writing. + * @msg: The message we want to send, which will contain several addr/data + * pairs to program (but few enough that they all fit in one TCS). + * + * This is used for all types of transfers (active, sleep, and wake). + */ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, const struct tcs_request *msg) { @@ -265,7 +475,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0; cmd_msgid |= CMD_MSGID_WRITE; - cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0); + cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id); for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) { cmd = &msg->cmds[i]; @@ -281,32 +491,30 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, } write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete); - cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); + cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id); write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable); } -static void __tcs_trigger(struct rsc_drv *drv, int tcs_id) -{ - u32 enable; - - /* - * HW req: Clear the DRV_CONTROL and enable TCS again - * While clearing ensure that the AMC mode trigger is cleared - * and then the mode enable is cleared. - */ - enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0); - enable &= ~TCS_AMC_MODE_TRIGGER; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - enable &= ~TCS_AMC_MODE_ENABLE; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - - /* Enable the AMC mode on the TCS and then trigger the TCS */ - enable = TCS_AMC_MODE_ENABLE; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); - enable |= TCS_AMC_MODE_TRIGGER; - write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); -} - +/** + * check_for_req_inflight() - Look to see if conflicting cmds are in flight. + * @drv: The controller. + * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers. + * @msg: The message we want to send, which will contain several addr/data + * pairs to program (but few enough that they all fit in one TCS). + * + * This will walk through the TCSes in the group and check if any of them + * appear to be sending to addresses referenced in the message. If it finds + * one it'll return -EBUSY. + * + * Only for use for active-only transfers. + * + * Must be called with the drv->lock held since that protects tcs_in_use. + * + * Return: 0 if nothing in flight or -EBUSY if we should try again later. + * The caller must re-enable interrupts between tries since that's + * the only way tcs_is_free() will ever return true and the only way + * RSC_DRV_CMD_ENABLE will ever be cleared. + */ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, const struct tcs_request *msg) { @@ -319,10 +527,10 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, if (tcs_is_free(drv, tcs_id)) continue; - curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); + curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id); for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) { - addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j); + addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j); for (k = 0; k < msg->num_cmds; k++) { if (addr == msg->cmds[k].addr) return -EBUSY; @@ -333,6 +541,15 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, return 0; } +/** + * find_free_tcs() - Find free tcs in the given tcs_group; only for active. + * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if + * we borrowed it because there are zero active-only ones). + * + * Must be called with the drv->lock held since that protects tcs_in_use. + * + * Return: The first tcs that's free. + */ static int find_free_tcs(struct tcs_group *tcs) { int i; @@ -345,6 +562,20 @@ static int find_free_tcs(struct tcs_group *tcs) return -EBUSY; } +/** + * tcs_write() - Store messages into a TCS right now, or return -EBUSY. + * @drv: The controller. + * @msg: The data to be sent. + * + * Grabs a TCS for ACTIVE_ONLY transfers and writes the messages to it. + * + * If there are no free TCSes for ACTIVE_ONLY transfers or if a command for + * the same address is already transferring returns -EBUSY which means the + * client should retry shortly. + * + * Return: 0 on success, -EBUSY if client should retry, or an error. + * Client should have interrupts enabled for a bit before retrying. + */ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg) { struct tcs_group *tcs; @@ -356,57 +587,77 @@ static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg) if (IS_ERR(tcs)) return PTR_ERR(tcs); - spin_lock_irqsave(&tcs->lock, flags); - spin_lock(&drv->lock); + spin_lock_irqsave(&drv->lock, flags); /* * The h/w does not like if we send a request to the same address, * when one is already in-flight or being processed. */ ret = check_for_req_inflight(drv, tcs, msg); - if (ret) { - spin_unlock(&drv->lock); - goto done_write; - } + if (ret) + goto unlock; - tcs_id = find_free_tcs(tcs); - if (tcs_id < 0) { - ret = tcs_id; - spin_unlock(&drv->lock); - goto done_write; - } + ret = find_free_tcs(tcs); + if (ret < 0) + goto unlock; + tcs_id = ret; tcs->req[tcs_id - tcs->offset] = msg; set_bit(tcs_id, drv->tcs_in_use); - spin_unlock(&drv->lock); + if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { + /* + * Clear previously programmed WAKE commands in selected + * repurposed TCS to avoid triggering them. tcs->slots will be + * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() + */ + write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); + write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0); + enable_tcs_irq(drv, tcs_id, true); + } + spin_unlock_irqrestore(&drv->lock, flags); + /* + * These two can be done after the lock is released because: + * - We marked "tcs_in_use" under lock. + * - Once "tcs_in_use" has been marked nobody else could be writing + * to these registers until the interrupt goes off. + * - The interrupt can't go off until we trigger w/ the last line + * of __tcs_set_trigger() below. + */ __tcs_buffer_write(drv, tcs_id, 0, msg); - __tcs_trigger(drv, tcs_id); + __tcs_set_trigger(drv, tcs_id, true); -done_write: - spin_unlock_irqrestore(&tcs->lock, flags); + return 0; +unlock: + spin_unlock_irqrestore(&drv->lock, flags); return ret; } /** - * rpmh_rsc_send_data: Validate the incoming message and write to the - * appropriate TCS block. + * rpmh_rsc_send_data() - Write / trigger active-only message. + * @drv: The controller. + * @msg: The data to be sent. * - * @drv: the controller - * @msg: the data to be sent + * NOTES: + * - This is only used for "ACTIVE_ONLY" since the limitations of this + * function don't make sense for sleep/wake cases. + * - To do the transfer, we will grab a whole TCS for ourselves--we don't + * try to share. If there are none available we'll wait indefinitely + * for a free one. + * - This function will not wait for the commands to be finished, only for + * data to be programmed into the RPMh. See rpmh_tx_done() which will + * be called when the transfer is fully complete. + * - This function must be called with interrupts enabled. If the hardware + * is busy doing someone else's transfer we need that transfer to fully + * finish so that we can have the hardware, and to fully finish it needs + * the interrupt handler to run. If the interrupts is set to run on the + * active CPU this can never happen if interrupts are disabled. * * Return: 0 on success, -EINVAL on error. - * Note: This call blocks until a valid data is written to the TCS. */ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) { int ret; - if (!msg || !msg->cmds || !msg->num_cmds || - msg->num_cmds > MAX_RPMH_PAYLOAD) { - WARN_ON(1); - return -EINVAL; - } - do { ret = tcs_write(drv, msg); if (ret == -EBUSY) { @@ -419,43 +670,28 @@ int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) return ret; } -static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd, - int len) -{ - int i, j; - - /* Check for already cached commands */ - for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) { - if (tcs->cmd_cache[i] != cmd[0].addr) - continue; - if (i + len >= tcs->num_tcs * tcs->ncpt) - goto seq_err; - for (j = 0; j < len; j++) { - if (tcs->cmd_cache[i + j] != cmd[j].addr) - goto seq_err; - } - return i; - } - - return -ENODATA; - -seq_err: - WARN(1, "Message does not match previous sequence.\n"); - return -EINVAL; -} - +/** + * find_slots() - Find a place to write the given message. + * @tcs: The tcs group to search. + * @msg: The message we want to find room for. + * @tcs_id: If we return 0 from the function, we return the global ID of the + * TCS to write to here. + * @cmd_id: If we return 0 from the function, we return the index of + * the command array of the returned TCS where the client should + * start writing the message. + * + * Only for use on sleep/wake TCSes since those are the only ones we maintain + * tcs->slots for. + * + * Return: -ENOMEM if there was no room, else 0. + */ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, int *tcs_id, int *cmd_id) { int slot, offset; int i = 0; - /* Find if we already have the msg in our TCS */ - slot = find_match(tcs, msg->cmds, msg->num_cmds); - if (slot >= 0) - goto copy_data; - - /* Do over, until we can fit the full payload in a TCS */ + /* Do over, until we can fit the full payload in a single TCS */ do { slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, i, msg->num_cmds, 0); @@ -464,11 +700,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, i += tcs->ncpt; } while (slot + msg->num_cmds - 1 >= i); -copy_data: bitmap_set(tcs->slots, slot, msg->num_cmds); - /* Copy the addresses of the resources over to the slots */ - for (i = 0; i < msg->num_cmds; i++) - tcs->cmd_cache[slot + i] = msg->cmds[i].addr; offset = slot / tcs->ncpt; *tcs_id = offset + tcs->offset; @@ -477,52 +709,157 @@ copy_data: return 0; } -static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg) +/** + * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger. + * @drv: The controller. + * @msg: The data to be written to the controller. + * + * This should only be called for for sleep/wake state, never active-only + * state. + * + * The caller must ensure that no other RPMH actions are happening and the + * controller is idle when this function is called since it runs lockless. + * + * Return: 0 if no error; else -error. + */ +int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) { struct tcs_group *tcs; int tcs_id = 0, cmd_id = 0; - unsigned long flags; int ret; tcs = get_tcs_for_msg(drv, msg); if (IS_ERR(tcs)) return PTR_ERR(tcs); - spin_lock_irqsave(&tcs->lock, flags); /* find the TCS id and the command in the TCS to write to */ ret = find_slots(tcs, msg, &tcs_id, &cmd_id); if (!ret) __tcs_buffer_write(drv, tcs_id, cmd_id, msg); - spin_unlock_irqrestore(&tcs->lock, flags); return ret; } /** - * rpmh_rsc_write_ctrl_data: Write request to the controller + * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy. + * @drv: The controller + * + * Checks if any of the AMCs are busy in handling ACTIVE sets. + * This is called from the last cpu powering down before flushing + * SLEEP and WAKE sets. If AMCs are busy, controller can not enter + * power collapse, so deny from the last cpu's pm notification. * - * @drv: the controller - * @msg: the data to be written to the controller + * Context: Must be called with the drv->lock held. * - * There is no response returned for writing the request to the controller. + * Return: + * * False - AMCs are idle + * * True - AMCs are busy */ -int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) +static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv) { - if (!msg || !msg->cmds || !msg->num_cmds || - msg->num_cmds > MAX_RPMH_PAYLOAD) { - pr_err("Payload error\n"); - return -EINVAL; + int m; + struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS]; + + /* + * If we made an active request on a RSC that does not have a + * dedicated TCS for active state use, then re-purposed wake TCSes + * should be checked for not busy, because we used wake TCSes for + * active requests in this case. + */ + if (!tcs->num_tcs) + tcs = &drv->tcs[WAKE_TCS]; + + for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) { + if (!tcs_is_free(drv, m)) + return true; } - /* Data sent to this API will not be sent immediately */ - if (msg->state == RPMH_ACTIVE_ONLY_STATE) - return -EINVAL; + return false; +} + +/** + * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy. + * @nfb: Pointer to the notifier block in struct rsc_drv. + * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT. + * @v: Unused + * + * This function is given to cpu_pm_register_notifier so we can be informed + * about when CPUs go down. When all CPUs go down we know no more active + * transfers will be started so we write sleep/wake sets. This function gets + * called from cpuidle code paths and also at system suspend time. + * + * If its last CPU going down and AMCs are not busy then writes cached sleep + * and wake messages to TCSes. The firmware then takes care of triggering + * them when entering deepest low power modes. + * + * Return: See cpu_pm_register_notifier() + */ +static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb, + unsigned long action, void *v) +{ + struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm); + int ret = NOTIFY_OK; + int cpus_in_pm; - return tcs_ctrl_write(drv, msg); + switch (action) { + case CPU_PM_ENTER: + cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm); + /* + * NOTE: comments for num_online_cpus() point out that it's + * only a snapshot so we need to be careful. It should be OK + * for us to use, though. It's important for us not to miss + * if we're the last CPU going down so it would only be a + * problem if a CPU went offline right after we did the check + * AND that CPU was not idle AND that CPU was the last non-idle + * CPU. That can't happen. CPUs would have to come out of idle + * before the CPU could go offline. + */ + if (cpus_in_pm < num_online_cpus()) + return NOTIFY_OK; + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + atomic_dec(&drv->cpus_in_pm); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } + + /* + * It's likely we're on the last CPU. Grab the drv->lock and write + * out the sleep/wake commands to RPMH hardware. Grabbing the lock + * means that if we race with another CPU coming up we are still + * guaranteed to be safe. If another CPU came up just after we checked + * and has grabbed the lock or started an active transfer then we'll + * notice we're busy and abort. If another CPU comes up after we start + * flushing it will be blocked from starting an active transfer until + * we're done flushing. If another CPU starts an active transfer after + * we release the lock we're still OK because we're no longer the last + * CPU. + */ + if (spin_trylock(&drv->lock)) { + if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)) + ret = NOTIFY_BAD; + spin_unlock(&drv->lock); + } else { + /* Another CPU must be up */ + return NOTIFY_OK; + } + + if (ret == NOTIFY_BAD) { + /* Double-check if we're here because someone else is up */ + if (cpus_in_pm < num_online_cpus()) + ret = NOTIFY_OK; + else + /* We won't be called w/ CPU_PM_ENTER_FAILED */ + atomic_dec(&drv->cpus_in_pm); + } + + return ret; } static int rpmh_probe_tcs_config(struct platform_device *pdev, - struct rsc_drv *drv) + struct rsc_drv *drv, void __iomem *base) { struct tcs_type_config { u32 type; @@ -532,15 +869,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev, u32 config, max_tcs, ncpt, offset; int i, ret, n, st = 0; struct tcs_group *tcs; - struct resource *res; - void __iomem *base; - char drv_id[10] = {0}; - - snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id); - base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset); if (ret) @@ -584,7 +912,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev, tcs->type = tcs_cfg[i].type; tcs->num_tcs = tcs_cfg[i].n; tcs->ncpt = ncpt; - spin_lock_init(&tcs->lock); if (!tcs->num_tcs || tcs->type == CONTROL_TCS) continue; @@ -596,19 +923,6 @@ static int rpmh_probe_tcs_config(struct platform_device *pdev, tcs->mask = ((1 << tcs->num_tcs) - 1) << st; tcs->offset = st; st += tcs->num_tcs; - - /* - * Allocate memory to cache sleep and wake requests to - * avoid reading TCS register memory. - */ - if (tcs->type == ACTIVE_TCS) - continue; - - tcs->cmd_cache = devm_kcalloc(&pdev->dev, - tcs->num_tcs * ncpt, sizeof(u32), - GFP_KERNEL); - if (!tcs->cmd_cache) - return -ENOMEM; } drv->num_tcs = st; @@ -620,7 +934,11 @@ static int rpmh_rsc_probe(struct platform_device *pdev) { struct device_node *dn = pdev->dev.of_node; struct rsc_drv *drv; + struct resource *res; + char drv_id[10] = {0}; int ret, irq; + u32 solver_config; + void __iomem *base; /* * Even though RPMh doesn't directly use cmd-db, all of its children @@ -646,7 +964,13 @@ static int rpmh_rsc_probe(struct platform_device *pdev) if (!drv->name) drv->name = dev_name(&pdev->dev); - ret = rpmh_probe_tcs_config(pdev, drv); + snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + ret = rpmh_probe_tcs_config(pdev, drv, base); if (ret) return ret; @@ -663,8 +987,22 @@ static int rpmh_rsc_probe(struct platform_device *pdev) if (ret) return ret; + /* + * CPU PM notification are not required for controllers that support + * 'HW solver' mode where they can be in autonomous mode executing low + * power mode to power down. + */ + solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG); + solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT; + solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; + if (!solver_config) { + drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback; + cpu_pm_register_notifier(&drv->rsc_pm); + } + /* Enable the active TCS to send requests immediately */ - write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask); + writel_relaxed(drv->tcs[ACTIVE_TCS].mask, + drv->tcs_base + RSC_DRV_IRQ_ENABLE); spin_lock_init(&drv->client.cache_lock); INIT_LIST_HEAD(&drv->client.cache); diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index eb0ded059d2e..f2b5b46ccd1f 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -9,6 +9,7 @@ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/lockdep.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -119,6 +120,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, { struct cache_req *req; unsigned long flags; + u32 old_sleep_val, old_wake_val; spin_lock_irqsave(&ctrlr->cache_lock, flags); req = __find_req(ctrlr, cmd->addr); @@ -133,26 +135,27 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, req->addr = cmd->addr; req->sleep_val = req->wake_val = UINT_MAX; - INIT_LIST_HEAD(&req->list); list_add_tail(&req->list, &ctrlr->cache); existing: + old_sleep_val = req->sleep_val; + old_wake_val = req->wake_val; + switch (state) { case RPMH_ACTIVE_ONLY_STATE: - if (req->sleep_val != UINT_MAX) - req->wake_val = cmd->data; - break; case RPMH_WAKE_ONLY_STATE: req->wake_val = cmd->data; break; case RPMH_SLEEP_STATE: req->sleep_val = cmd->data; break; - default: - break; } - ctrlr->dirty = true; + ctrlr->dirty |= (req->sleep_val != old_sleep_val || + req->wake_val != old_wake_val) && + req->sleep_val != UINT_MAX && + req->wake_val != UINT_MAX; + unlock: spin_unlock_irqrestore(&ctrlr->cache_lock, flags); @@ -287,6 +290,7 @@ static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) spin_lock_irqsave(&ctrlr->cache_lock, flags); list_add_tail(&req->list, &ctrlr->batch_cache); + ctrlr->dirty = true; spin_unlock_irqrestore(&ctrlr->cache_lock, flags); } @@ -294,12 +298,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr) { struct batch_cache_req *req; const struct rpmh_request *rpm_msg; - unsigned long flags; int ret = 0; int i; /* Send Sleep/Wake requests to the controller, expect no response */ - spin_lock_irqsave(&ctrlr->cache_lock, flags); list_for_each_entry(req, &ctrlr->batch_cache, list) { for (i = 0; i < req->count; i++) { rpm_msg = req->rpm_msgs + i; @@ -309,23 +311,10 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr) break; } } - spin_unlock_irqrestore(&ctrlr->cache_lock, flags); return ret; } -static void invalidate_batch(struct rpmh_ctrlr *ctrlr) -{ - struct batch_cache_req *req, *tmp; - unsigned long flags; - - spin_lock_irqsave(&ctrlr->cache_lock, flags); - list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) - kfree(req); - INIT_LIST_HEAD(&ctrlr->batch_cache); - spin_unlock_irqrestore(&ctrlr->cache_lock, flags); -} - /** * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the * batch to finish. @@ -442,36 +431,42 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, } /** - * rpmh_flush: Flushes the buffered active and sleep sets to TCS - * - * @ctrlr: controller making request to flush cached data + * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes * - * Return: -EBUSY if the controller is busy, probably waiting on a response - * to a RPMH request sent earlier. + * @ctrlr: Controller making request to flush cached data * - * This function is always called from the sleep code from the last CPU - * that is powering down the entire system. Since no other RPMH API would be - * executing at this time, it is safe to run lockless. + * Return: + * * 0 - Success + * * Error code - Otherwise */ int rpmh_flush(struct rpmh_ctrlr *ctrlr) { struct cache_req *p; - int ret; + int ret = 0; + + lockdep_assert_irqs_disabled(); + + /* + * Currently rpmh_flush() is only called when we think we're running + * on the last processor. If the lock is busy it means another + * processor is up and it's better to abort than spin. + */ + if (!spin_trylock(&ctrlr->cache_lock)) + return -EBUSY; if (!ctrlr->dirty) { pr_debug("Skipping flush, TCS has latest data.\n"); - return 0; + goto exit; } + /* Invalidate the TCSes first to avoid stale data */ + rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); + /* First flush the cached batch requests */ ret = flush_batch(ctrlr); if (ret) - return ret; + goto exit; - /* - * Nobody else should be calling this function other than system PM, - * hence we can run without locks. - */ list_for_each_entry(p, &ctrlr->cache, list) { if (!is_req_valid(p)) { pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x", @@ -481,38 +476,40 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr) ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, p->sleep_val); if (ret) - return ret; + goto exit; ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, p->wake_val); if (ret) - return ret; + goto exit; } ctrlr->dirty = false; - return 0; +exit: + spin_unlock(&ctrlr->cache_lock); + return ret; } /** - * rpmh_invalidate: Invalidate all sleep and active sets - * sets. + * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache * * @dev: The device making the request * - * Invalidate the sleep and active values in the TCS blocks. + * Invalidate the sleep and wake values in batch_cache. */ int rpmh_invalidate(const struct device *dev) { struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); - int ret; + struct batch_cache_req *req, *tmp; + unsigned long flags; - invalidate_batch(ctrlr); + spin_lock_irqsave(&ctrlr->cache_lock, flags); + list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) + kfree(req); + INIT_LIST_HEAD(&ctrlr->batch_cache); ctrlr->dirty = true; + spin_unlock_irqrestore(&ctrlr->cache_lock, flags); - do { - ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); - } while (ret == -EAGAIN); - - return ret; + return 0; } EXPORT_SYMBOL(rpmh_invalidate); diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c index 4d264d0672c4..e72426221a69 100644 --- a/drivers/soc/qcom/rpmhpd.c +++ b/drivers/soc/qcom/rpmhpd.c @@ -4,6 +4,7 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/pm_domain.h> #include <linux/slab.h> @@ -166,6 +167,24 @@ static const struct rpmhpd_desc sm8150_desc = { .num_pds = ARRAY_SIZE(sm8150_rpmhpds), }; +static struct rpmhpd *sm8250_rpmhpds[] = { + [SM8250_CX] = &sdm845_cx, + [SM8250_CX_AO] = &sdm845_cx_ao, + [SM8250_EBI] = &sdm845_ebi, + [SM8250_GFX] = &sdm845_gfx, + [SM8250_LCX] = &sdm845_lcx, + [SM8250_LMX] = &sdm845_lmx, + [SM8250_MMCX] = &sm8150_mmcx, + [SM8250_MMCX_AO] = &sm8150_mmcx_ao, + [SM8250_MX] = &sdm845_mx, + [SM8250_MX_AO] = &sdm845_mx_ao, +}; + +static const struct rpmhpd_desc sm8250_desc = { + .rpmhpds = sm8250_rpmhpds, + .num_pds = ARRAY_SIZE(sm8250_rpmhpds), +}; + /* SC7180 RPMH powerdomains */ static struct rpmhpd *sc7180_rpmhpds[] = { [SC7180_CX] = &sdm845_cx, @@ -187,8 +206,10 @@ static const struct of_device_id rpmhpd_match_table[] = { { .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc }, { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc }, { .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc }, + { .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc }, { } }; +MODULE_DEVICE_TABLE(of, rpmhpd_match_table); static int rpmhpd_send_corner(struct rpmhpd *pd, int state, unsigned int corner, bool sync) @@ -460,3 +481,6 @@ static int __init rpmhpd_init(void) return platform_driver_register(&rpmhpd_driver); } core_initcall(rpmhpd_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c index 2b1834c5609a..f2168e4259b2 100644 --- a/drivers/soc/qcom/rpmpd.c +++ b/drivers/soc/qcom/rpmpd.c @@ -4,6 +4,7 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/pm_domain.h> #include <linux/of.h> @@ -226,6 +227,7 @@ static const struct of_device_id rpmpd_match_table[] = { { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc }, { } }; +MODULE_DEVICE_TABLE(of, rpmpd_match_table); static int rpmpd_send_enable(struct rpmpd *pd, bool enable) { @@ -422,3 +424,6 @@ static int __init rpmpd_init(void) return platform_driver_register(&rpmpd_driver); } core_initcall(rpmpd_init); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPM Power Domain Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c index c7300d54e444..07183d731d74 100644 --- a/drivers/soc/qcom/smp2p.c +++ b/drivers/soc/qcom/smp2p.c @@ -474,10 +474,8 @@ static int qcom_smp2p_probe(struct platform_device *pdev) goto report_read_failure; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n"); + if (irq < 0) return irq; - } smp2p->mbox_client.dev = &pdev->dev; smp2p->mbox_client.knows_txdone = true; diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index ebb49aee179b..5983c6ffb078 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -188,6 +188,10 @@ static const struct soc_id soc_id[] = { { 216, "MSM8674PRO" }, { 217, "MSM8974-AA" }, { 218, "MSM8974-AB" }, + { 233, "MSM8936" }, + { 239, "MSM8939" }, + { 240, "APQ8036" }, + { 241, "APQ8039" }, { 246, "MSM8996" }, { 247, "APQ8016" }, { 248, "MSM8216" }, @@ -430,6 +434,8 @@ static int qcom_socinfo_probe(struct platform_device *pdev) qs->attr.family = "Snapdragon"; qs->attr.machine = socinfo_machine(&pdev->dev, le32_to_cpu(info->id)); + qs->attr.soc_id = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", + le32_to_cpu(info->id)); qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", SOCINFO_MAJOR(le32_to_cpu(info->ver)), SOCINFO_MINOR(le32_to_cpu(info->ver))); |