diff options
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/Kconfig.arm | 12 | ||||
-rw-r--r-- | drivers/cpuidle/Makefile | 4 | ||||
-rw-r--r-- | drivers/cpuidle/coupled.c | 9 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-clps711x.c | 5 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-kirkwood.c | 5 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-psci-domain.c | 308 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-psci.c | 161 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle-psci.h | 17 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 9 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 46 | ||||
-rw-r--r-- | drivers/cpuidle/dt_idle_states.c | 5 | ||||
-rw-r--r-- | drivers/cpuidle/governors/teo.c | 2 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 16 |
13 files changed, 502 insertions, 97 deletions
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index a224d33dda7f..62272ecfa771 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -25,7 +25,7 @@ config ARM_PSCI_CPUIDLE config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" - depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS + depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS || COMPILE_TEST depends on MCPM && !ARM64 select ARM_CPU_SUSPEND select CPU_IDLE_MULTIPLE_DRIVERS @@ -51,13 +51,13 @@ config ARM_HIGHBANK_CPUIDLE config ARM_KIRKWOOD_CPUIDLE bool "CPU Idle Driver for Marvell Kirkwood SoCs" - depends on MACH_KIRKWOOD && !ARM64 + depends on (MACH_KIRKWOOD || COMPILE_TEST) && !ARM64 help This adds the CPU Idle driver for Marvell Kirkwood SoCs. config ARM_ZYNQ_CPUIDLE bool "CPU Idle Driver for Xilinx Zynq processors" - depends on ARCH_ZYNQ && !ARM64 + depends on (ARCH_ZYNQ || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Xilinx Zynq processors. @@ -70,19 +70,19 @@ config ARM_U8500_CPUIDLE config ARM_AT91_CPUIDLE bool "Cpu Idle Driver for the AT91 processors" default y - depends on ARCH_AT91 && !ARM64 + depends on (ARCH_AT91 || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle for AT91 processors. config ARM_EXYNOS_CPUIDLE bool "Cpu Idle Driver for the Exynos processors" - depends on ARCH_EXYNOS && !ARM64 + depends on (ARCH_EXYNOS || COMPILE_TEST) && !ARM64 select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP help Select this to enable cpuidle for Exynos processors. config ARM_MVEBU_V7_CPUIDLE bool "CPU Idle Driver for mvebu v7 family processors" - depends on ARCH_MVEBU && !ARM64 + depends on (ARCH_MVEBU || COMPILE_TEST) && !ARM64 help Select this to enable cpuidle on Armada 370, 38x and XP processors. diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index ee70d5cc5b99..cc8c769d7fa9 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -21,7 +21,9 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o -obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o +obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle_psci.o +cpuidle_psci-y := cpuidle-psci.o +cpuidle_psci-$(CONFIG_PM_GENERIC_DOMAINS_OF) += cpuidle-psci-domain.o ############################################################################### # MIPS drivers diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index b607278df25b..04003b90dc49 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -89,6 +89,7 @@ * @coupled_cpus: mask of cpus that are part of the coupled set * @requested_state: array of requested states for cpus in the coupled set * @ready_waiting_counts: combined count of cpus in ready or waiting loops + * @abort_barrier: synchronisation point for abort cases * @online_count: count of cpus that are online * @refcnt: reference count of cpuidle devices that are using this struct * @prevent: flag to prevent coupled idle while a cpu is hotplugging @@ -338,7 +339,7 @@ static void cpuidle_coupled_poke(int cpu) /** * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting - * @dev: struct cpuidle_device for this cpu + * @this_cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Calls cpuidle_coupled_poke on all other online cpus. @@ -355,7 +356,7 @@ static void cpuidle_coupled_poke_others(int this_cpu, /** * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * @@ -376,7 +377,7 @@ static int cpuidle_coupled_set_waiting(int cpu, /** * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop - * @dev: struct cpuidle_device for this cpu + * @cpu: target cpu * @coupled: the struct coupled that contains the current cpu * * Removes the requested idle state for the specified cpuidle device. @@ -412,7 +413,7 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) /** * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed - * @cpu - this cpu + * @cpu: this cpu * * Turns on interrupts and spins until any outstanding poke interrupts have * been processed and the poke bit has been cleared. diff --git a/drivers/cpuidle/cpuidle-clps711x.c b/drivers/cpuidle/cpuidle-clps711x.c index 6e36740f5719..fc22c59b6c73 100644 --- a/drivers/cpuidle/cpuidle-clps711x.c +++ b/drivers/cpuidle/cpuidle-clps711x.c @@ -37,10 +37,7 @@ static struct cpuidle_driver clps711x_idle_driver = { static int __init clps711x_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - clps711x_halt = devm_ioremap_resource(&pdev->dev, res); + clps711x_halt = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(clps711x_halt)) return PTR_ERR(clps711x_halt); diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c index d23d8f468c12..511c4f46027a 100644 --- a/drivers/cpuidle/cpuidle-kirkwood.c +++ b/drivers/cpuidle/cpuidle-kirkwood.c @@ -55,10 +55,7 @@ static struct cpuidle_driver kirkwood_idle_driver = { /* Initialize CPU idle by registering the idle states */ static int kirkwood_cpuidle_probe(struct platform_device *pdev) { - struct resource *res; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ddr_operation_base = devm_ioremap_resource(&pdev->dev, res); + ddr_operation_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ddr_operation_base)) return PTR_ERR(ddr_operation_base); diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c new file mode 100644 index 000000000000..423f03bbeb74 --- /dev/null +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PM domains for CPUs via genpd - managed by cpuidle-psci. + * + * Copyright (C) 2019 Linaro Ltd. + * Author: Ulf Hansson <ulf.hansson@linaro.org> + * + */ + +#define pr_fmt(fmt) "CPUidle PSCI: " fmt + +#include <linux/cpu.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> +#include <linux/psci.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include "cpuidle-psci.h" + +struct psci_pd_provider { + struct list_head link; + struct device_node *node; +}; + +static LIST_HEAD(psci_pd_providers); +static bool osi_mode_enabled __initdata; + +static int psci_pd_power_off(struct generic_pm_domain *pd) +{ + struct genpd_power_state *state = &pd->states[pd->state_idx]; + u32 *pd_state; + + if (!state->data) + return 0; + + /* OSI mode is enabled, set the corresponding domain state. */ + pd_state = state->data; + psci_set_domain_state(*pd_state); + + return 0; +} + +static int __init psci_pd_parse_state_nodes(struct genpd_power_state *states, + int state_count) +{ + int i, ret; + u32 psci_state, *psci_state_buf; + + for (i = 0; i < state_count; i++) { + ret = psci_dt_parse_state_node(to_of_node(states[i].fwnode), + &psci_state); + if (ret) + goto free_state; + + psci_state_buf = kmalloc(sizeof(u32), GFP_KERNEL); + if (!psci_state_buf) { + ret = -ENOMEM; + goto free_state; + } + *psci_state_buf = psci_state; + states[i].data = psci_state_buf; + } + + return 0; + +free_state: + i--; + for (; i >= 0; i--) + kfree(states[i].data); + return ret; +} + +static int __init psci_pd_parse_states(struct device_node *np, + struct genpd_power_state **states, int *state_count) +{ + int ret; + + /* Parse the domain idle states. */ + ret = of_genpd_parse_idle_states(np, states, state_count); + if (ret) + return ret; + + /* Fill out the PSCI specifics for each found state. */ + ret = psci_pd_parse_state_nodes(*states, *state_count); + if (ret) + kfree(*states); + + return ret; +} + +static void psci_pd_free_states(struct genpd_power_state *states, + unsigned int state_count) +{ + int i; + + for (i = 0; i < state_count; i++) + kfree(states[i].data); + kfree(states); +} + +static int __init psci_pd_init(struct device_node *np) +{ + struct generic_pm_domain *pd; + struct psci_pd_provider *pd_provider; + struct dev_power_governor *pd_gov; + struct genpd_power_state *states = NULL; + int ret = -ENOMEM, state_count = 0; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + goto out; + + pd_provider = kzalloc(sizeof(*pd_provider), GFP_KERNEL); + if (!pd_provider) + goto free_pd; + + pd->name = kasprintf(GFP_KERNEL, "%pOF", np); + if (!pd->name) + goto free_pd_prov; + + /* + * Parse the domain idle states and let genpd manage the state selection + * for those being compatible with "domain-idle-state". + */ + ret = psci_pd_parse_states(np, &states, &state_count); + if (ret) + goto free_name; + + pd->free_states = psci_pd_free_states; + pd->name = kbasename(pd->name); + pd->power_off = psci_pd_power_off; + pd->states = states; + pd->state_count = state_count; + pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN; + + /* Use governor for CPU PM domains if it has some states to manage. */ + pd_gov = state_count > 0 ? &pm_domain_cpu_gov : NULL; + + ret = pm_genpd_init(pd, pd_gov, false); + if (ret) { + psci_pd_free_states(states, state_count); + goto free_name; + } + + ret = of_genpd_add_provider_simple(np, pd); + if (ret) + goto remove_pd; + + pd_provider->node = of_node_get(np); + list_add(&pd_provider->link, &psci_pd_providers); + + pr_debug("init PM domain %s\n", pd->name); + return 0; + +remove_pd: + pm_genpd_remove(pd); +free_name: + kfree(pd->name); +free_pd_prov: + kfree(pd_provider); +free_pd: + kfree(pd); +out: + pr_err("failed to init PM domain ret=%d %pOF\n", ret, np); + return ret; +} + +static void __init psci_pd_remove(void) +{ + struct psci_pd_provider *pd_provider, *it; + struct generic_pm_domain *genpd; + + list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) { + of_genpd_del_provider(pd_provider->node); + + genpd = of_genpd_remove_last(pd_provider->node); + if (!IS_ERR(genpd)) + kfree(genpd); + + of_node_put(pd_provider->node); + list_del(&pd_provider->link); + kfree(pd_provider); + } +} + +static int __init psci_pd_init_topology(struct device_node *np, bool add) +{ + struct device_node *node; + struct of_phandle_args child, parent; + int ret; + + for_each_child_of_node(np, node) { + if (of_parse_phandle_with_args(node, "power-domains", + "#power-domain-cells", 0, &parent)) + continue; + + child.np = node; + child.args_count = 0; + + ret = add ? of_genpd_add_subdomain(&parent, &child) : + of_genpd_remove_subdomain(&parent, &child); + of_node_put(parent.np); + if (ret) { + of_node_put(node); + return ret; + } + } + + return 0; +} + +static int __init psci_pd_add_topology(struct device_node *np) +{ + return psci_pd_init_topology(np, true); +} + +static void __init psci_pd_remove_topology(struct device_node *np) +{ + psci_pd_init_topology(np, false); +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci-1.0" }, + {} +}; + +static int __init psci_idle_init_domains(void) +{ + struct device_node *np = of_find_matching_node(NULL, psci_of_match); + struct device_node *node; + int ret = 0, pd_count = 0; + + if (!np) + return -ENODEV; + + /* Currently limit the hierarchical topology to be used in OSI mode. */ + if (!psci_has_osi_support()) + goto out; + + /* + * Parse child nodes for the "#power-domain-cells" property and + * initialize a genpd/genpd-of-provider pair when it's found. + */ + for_each_child_of_node(np, node) { + if (!of_find_property(node, "#power-domain-cells", NULL)) + continue; + + ret = psci_pd_init(node); + if (ret) + goto put_node; + + pd_count++; + } + + /* Bail out if not using the hierarchical CPU topology. */ + if (!pd_count) + goto out; + + /* Link genpd masters/subdomains to model the CPU topology. */ + ret = psci_pd_add_topology(np); + if (ret) + goto remove_pd; + + /* Try to enable OSI mode. */ + ret = psci_set_osi_mode(); + if (ret) { + pr_warn("failed to enable OSI mode: %d\n", ret); + psci_pd_remove_topology(np); + goto remove_pd; + } + + osi_mode_enabled = true; + of_node_put(np); + pr_info("Initialized CPU PM domain topology\n"); + return pd_count; + +put_node: + of_node_put(node); +remove_pd: + if (pd_count) + psci_pd_remove(); + pr_err("failed to create CPU PM domains ret=%d\n", ret); +out: + of_node_put(np); + return ret; +} +subsys_initcall(psci_idle_init_domains); + +struct device __init *psci_dt_attach_cpu(int cpu) +{ + struct device *dev; + + if (!osi_mode_enabled) + return NULL; + + dev = dev_pm_domain_attach_by_name(get_cpu_device(cpu), "psci"); + if (IS_ERR_OR_NULL(dev)) + return dev; + + pm_runtime_irq_safe(dev); + if (cpu_online(cpu)) + pm_runtime_get_sync(dev); + + return dev; +} diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index f3c1a2396f98..edd7a54ef0d3 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -8,6 +8,7 @@ #define pr_fmt(fmt) "CPUidle PSCI: " fmt +#include <linux/cpuhotplug.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/cpu_pm.h> @@ -16,21 +17,107 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/psci.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> #include <asm/cpuidle.h> +#include "cpuidle-psci.h" #include "dt_idle_states.h" -static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); +struct psci_cpuidle_data { + u32 *psci_states; + struct device *dev; +}; + +static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); +static DEFINE_PER_CPU(u32, domain_state); +static bool psci_cpuidle_use_cpuhp __initdata; + +void psci_set_domain_state(u32 state) +{ + __this_cpu_write(domain_state, state); +} + +static inline u32 psci_get_domain_state(void) +{ + return __this_cpu_read(domain_state); +} + +static inline int psci_enter_state(int idx, u32 state) +{ + return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state); +} + +static int psci_enter_domain_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data); + u32 *states = data->psci_states; + struct device *pd_dev = data->dev; + u32 state; + int ret; + + /* Do runtime PM to manage a hierarchical CPU toplogy. */ + pm_runtime_put_sync_suspend(pd_dev); + + state = psci_get_domain_state(); + if (!state) + state = states[idx]; + + ret = psci_enter_state(idx, state); + + pm_runtime_get_sync(pd_dev); + + /* Clear the domain state to start fresh when back from idle. */ + psci_set_domain_state(0); + return ret; +} + +static int psci_idle_cpuhp_up(unsigned int cpu) +{ + struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); + + if (pd_dev) + pm_runtime_get_sync(pd_dev); + + return 0; +} + +static int psci_idle_cpuhp_down(unsigned int cpu) +{ + struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); + + if (pd_dev) { + pm_runtime_put_sync(pd_dev); + /* Clear domain state to start fresh at next online. */ + psci_set_domain_state(0); + } + + return 0; +} + +static void __init psci_idle_init_cpuhp(void) +{ + int err; + + if (!psci_cpuidle_use_cpuhp) + return; + + err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, + "cpuidle/psci:online", + psci_idle_cpuhp_up, + psci_idle_cpuhp_down); + if (err) + pr_warn("Failed %d while setup cpuhp state\n", err); +} static int psci_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { - u32 *state = __this_cpu_read(psci_power_state); + u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states); - return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, - idx, state[idx - 1]); + return psci_enter_state(idx, state[idx]); } static struct cpuidle_driver psci_idle_driver __initdata = { @@ -56,7 +143,7 @@ static const struct of_device_id psci_idle_state_match[] __initconst = { { }, }; -static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) +int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) { int err = of_property_read_u32(np, "arm,psci-suspend-param", state); @@ -73,28 +160,25 @@ static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) return 0; } -static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +static int __init psci_dt_cpu_init_idle(struct cpuidle_driver *drv, + struct device_node *cpu_node, + unsigned int state_count, int cpu) { - int i, ret = 0, count = 0; + int i, ret = 0; u32 *psci_states; struct device_node *state_node; + struct psci_cpuidle_data *data = per_cpu_ptr(&psci_cpuidle_data, cpu); - /* Count idle states */ - while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - count))) { - count++; - of_node_put(state_node); - } - - if (!count) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + state_count++; /* Add WFI state too */ + psci_states = kcalloc(state_count, sizeof(*psci_states), GFP_KERNEL); if (!psci_states) return -ENOMEM; - for (i = 0; i < count; i++) { - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + for (i = 1; i < state_count; i++) { + state_node = of_get_cpu_state_node(cpu_node, i - 1); + if (!state_node) + break; + ret = psci_dt_parse_state_node(state_node, &psci_states[i]); of_node_put(state_node); @@ -104,8 +188,33 @@ static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; + if (i != state_count) { + ret = -ENODEV; + goto free_mem; + } + + /* Currently limit the hierarchical topology to be used in OSI mode. */ + if (psci_has_osi_support()) { + data->dev = psci_dt_attach_cpu(cpu); + if (IS_ERR(data->dev)) { + ret = PTR_ERR(data->dev); + goto free_mem; + } + + /* + * Using the deepest state for the CPU to trigger a potential + * selection of a shared state for the domain, assumes the + * domain states are all deeper states. + */ + if (data->dev) { + drv->states[state_count - 1].enter = + psci_enter_domain_idle_state; + psci_cpuidle_use_cpuhp = true; + } + } + + /* Idle states parsed correctly, store them in the per-cpu struct. */ + data->psci_states = psci_states; return 0; free_mem: @@ -113,7 +222,8 @@ free_mem: return ret; } -static __init int psci_cpu_init_idle(unsigned int cpu) +static __init int psci_cpu_init_idle(struct cpuidle_driver *drv, + unsigned int cpu, unsigned int state_count) { struct device_node *cpu_node; int ret; @@ -129,7 +239,7 @@ static __init int psci_cpu_init_idle(unsigned int cpu) if (!cpu_node) return -ENODEV; - ret = psci_dt_cpu_init_idle(cpu_node, cpu); + ret = psci_dt_cpu_init_idle(drv, cpu_node, state_count, cpu); of_node_put(cpu_node); @@ -185,7 +295,7 @@ static int __init psci_idle_init_cpu(int cpu) /* * Initialize PSCI idle states. */ - ret = psci_cpu_init_idle(cpu); + ret = psci_cpu_init_idle(drv, cpu, ret); if (ret) { pr_err("CPU %d failed to PSCI idle\n", cpu); goto out_kfree_drv; @@ -221,6 +331,7 @@ static int __init psci_idle_init(void) goto out_fail; } + psci_idle_init_cpuhp(); return 0; out_fail: diff --git a/drivers/cpuidle/cpuidle-psci.h b/drivers/cpuidle/cpuidle-psci.h new file mode 100644 index 000000000000..7299a04dd467 --- /dev/null +++ b/drivers/cpuidle/cpuidle-psci.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __CPUIDLE_PSCI_H +#define __CPUIDLE_PSCI_H + +struct device_node; + +void psci_set_domain_state(u32 state); +int __init psci_dt_parse_state_node(struct device_node *np, u32 *state); + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF +struct device __init *psci_dt_attach_cpu(int cpu); +#else +static inline struct device __init *psci_dt_attach_cpu(int cpu) { return NULL; } +#endif + +#endif /* __CPUIDLE_PSCI_H */ diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 33d19c8eb027..de81298051b3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -121,6 +121,9 @@ void cpuidle_use_deepest_state(u64 latency_limit_ns) * cpuidle_find_deepest_state - Find the deepest available idle state. * @drv: cpuidle driver for the given CPU. * @dev: cpuidle device for the given CPU. + * @latency_limit_ns: Idle state exit latency limit + * + * Return: the index of the deepest available idle state. */ int cpuidle_find_deepest_state(struct cpuidle_driver *drv, struct cpuidle_device *dev, @@ -572,10 +575,14 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) if (!try_module_get(drv->owner)) return -EINVAL; - for (i = 0; i < drv->state_count; i++) + for (i = 0; i < drv->state_count; i++) { if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE) dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER; + if (drv->states[i].flags & CPUIDLE_FLAG_OFF) + dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER; + } + per_cpu(cpuidle_devices, dev->cpu) = dev; list_add(&dev->device_list, &cpuidle_detected_devices); diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index ce6a5f80fb83..4070e573bf43 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -155,8 +155,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) { int i; - drv->refcnt = 0; - /* * Use all possible CPUs as the default, because if the kernel boots * with some CPUs offline and then we online one of them, the CPU @@ -240,9 +238,6 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) */ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv) { - if (WARN_ON(drv->refcnt > 0)) - return; - if (drv->bctimer) { drv->bctimer = 0; on_each_cpu_mask(drv->cpumask, cpuidle_setup_broadcast_timer, @@ -350,47 +345,6 @@ struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev) EXPORT_SYMBOL_GPL(cpuidle_get_cpu_driver); /** - * cpuidle_driver_ref - get a reference to the driver. - * - * Increment the reference counter of the cpuidle driver associated with - * the current CPU. - * - * Returns a pointer to the driver, or NULL if the current CPU has no driver. - */ -struct cpuidle_driver *cpuidle_driver_ref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv) - drv->refcnt++; - - spin_unlock(&cpuidle_driver_lock); - return drv; -} - -/** - * cpuidle_driver_unref - puts down the refcount for the driver - * - * Decrement the reference counter of the cpuidle driver associated with - * the current CPU. - */ -void cpuidle_driver_unref(void) -{ - struct cpuidle_driver *drv; - - spin_lock(&cpuidle_driver_lock); - - drv = cpuidle_get_driver(); - if (drv && !WARN_ON(drv->refcnt <= 0)) - drv->refcnt--; - - spin_unlock(&cpuidle_driver_lock); -} - -/** * cpuidle_driver_state_disabled - Disable or enable an idle state * @drv: cpuidle driver owning the state * @idx: State index diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index d06d21a9525d..252f2a9686a6 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -111,8 +111,7 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx, for (cpu = cpumask_next(cpumask_first(cpumask), cpumask); cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) { cpu_node = of_cpu_device_node_get(cpu); - curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - idx); + curr_state_node = of_get_cpu_state_node(cpu_node, idx); if (state_node != curr_state_node) valid = false; @@ -170,7 +169,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, cpu_node = of_cpu_device_node_get(cpumask_first(cpumask)); for (i = 0; ; i++) { - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + state_node = of_get_cpu_state_node(cpu_node, i); if (!state_node) break; diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c index de7e706efd46..6deaaf5f05b5 100644 --- a/drivers/cpuidle/governors/teo.c +++ b/drivers/cpuidle/governors/teo.c @@ -198,7 +198,7 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) * pattern detection. */ cpu_data->intervals[cpu_data->interval_idx++] = measured_ns; - if (cpu_data->interval_idx > INTERVALS) + if (cpu_data->interval_idx >= INTERVALS) cpu_data->interval_idx = 0; } diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 38ef770be90d..cdeedbf02646 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -142,6 +142,7 @@ static struct attribute_group cpuidle_attr_group = { /** * cpuidle_add_interface - add CPU global sysfs attributes + * @dev: the target device */ int cpuidle_add_interface(struct device *dev) { @@ -153,6 +154,7 @@ int cpuidle_add_interface(struct device *dev) /** * cpuidle_remove_interface - remove CPU global sysfs attributes + * @dev: the target device */ void cpuidle_remove_interface(struct device *dev) { @@ -327,6 +329,14 @@ static ssize_t store_state_disable(struct cpuidle_state *state, return size; } +static ssize_t show_state_default_status(struct cpuidle_state *state, + struct cpuidle_state_usage *state_usage, + char *buf) +{ + return sprintf(buf, "%s\n", + state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled"); +} + define_one_state_ro(name, show_state_name); define_one_state_ro(desc, show_state_desc); define_one_state_ro(latency, show_state_exit_latency); @@ -337,6 +347,7 @@ define_one_state_ro(time, show_state_time); define_one_state_rw(disable, show_state_disable, store_state_disable); define_one_state_ro(above, show_state_above); define_one_state_ro(below, show_state_below); +define_one_state_ro(default_status, show_state_default_status); static struct attribute *cpuidle_state_default_attrs[] = { &attr_name.attr, @@ -349,6 +360,7 @@ static struct attribute *cpuidle_state_default_attrs[] = { &attr_disable.attr, &attr_above.attr, &attr_below.attr, + &attr_default_status.attr, NULL }; @@ -615,7 +627,7 @@ static struct kobj_type ktype_driver_cpuidle = { /** * cpuidle_add_driver_sysfs - adds the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) { @@ -646,7 +658,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev) /** * cpuidle_remove_driver_sysfs - removes the driver name sysfs attribute - * @device: the target device + * @dev: the target device */ static void cpuidle_remove_driver_sysfs(struct cpuidle_device *dev) { |