From e720a6c8fbdb86dcbb493d432e632dfafe6381cc Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Wed, 10 Apr 2019 10:20:21 +0200 Subject: drivers: firmware: psci: Move psci to separate directory Some following changes extends the PSCI driver with some additional files. Avoid to continue cluttering the toplevel firmware directory and first move the PSCI files into a PSCI sub-directory. Suggested-by: Mark Rutland Signed-off-by: Ulf Hansson Acked-by: Daniel Lezcano Acked-by: Mark Rutland Acked-by: Lorenzo Pieralisi Signed-off-by: Rafael J. Wysocki --- drivers/firmware/Kconfig | 15 +- drivers/firmware/Makefile | 3 +- drivers/firmware/psci.c | 708 ----------------------------------- drivers/firmware/psci/Kconfig | 13 + drivers/firmware/psci/Makefile | 4 + drivers/firmware/psci/psci.c | 708 +++++++++++++++++++++++++++++++++++ drivers/firmware/psci/psci_checker.c | 505 +++++++++++++++++++++++++ drivers/firmware/psci_checker.c | 505 ------------------------- 8 files changed, 1232 insertions(+), 1229 deletions(-) delete mode 100644 drivers/firmware/psci.c create mode 100644 drivers/firmware/psci/Kconfig create mode 100644 drivers/firmware/psci/Makefile create mode 100644 drivers/firmware/psci/psci.c create mode 100644 drivers/firmware/psci/psci_checker.c delete mode 100644 drivers/firmware/psci_checker.c (limited to 'drivers/firmware') diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index cac16c4b0df3..7b655f6156fb 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -5,20 +5,6 @@ menu "Firmware Drivers" -config ARM_PSCI_FW - bool - -config ARM_PSCI_CHECKER - bool "ARM PSCI checker" - depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST - help - Run the PSCI checker during startup. This checks that hotplug and - suspend operations work correctly when using PSCI. - - The torture tests may interfere with the PSCI checker by turning CPUs - on and off through hotplug, so for now torture tests and PSCI checker - are mutually exclusive. - config ARM_SCMI_PROTOCOL bool "ARM System Control and Management Interface (SCMI) Message Protocol" depends on ARM || ARM64 || COMPILE_TEST @@ -270,6 +256,7 @@ config TI_SCI_PROTOCOL config HAVE_ARM_SMCCC bool +source "drivers/firmware/psci/Kconfig" source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 80feb635120f..9a3909a22682 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -2,8 +2,6 @@ # # Makefile for the linux kernel. # -obj-$(CONFIG_ARM_PSCI_FW) += psci.o -obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o @@ -25,6 +23,7 @@ CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQU obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/ +obj-y += psci/ obj-y += broadcom/ obj-y += meson/ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c deleted file mode 100644 index c80ec1d03274..000000000000 --- a/drivers/firmware/psci.c +++ /dev/null @@ -1,708 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2015 ARM Limited - */ - -#define pr_fmt(fmt) "psci: " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include - -/* - * While a 64-bit OS can make calls with SMC32 calling conventions, for some - * calls it is necessary to use SMC64 to pass or return 64-bit values. - * For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate - * (native-width) function ID. - */ -#ifdef CONFIG_64BIT -#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name -#else -#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name -#endif - -/* - * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF - * calls to its resident CPU, so we must avoid issuing those. We never migrate - * a Trusted OS even if it claims to be capable of migration -- doing so will - * require cooperation with a Trusted OS driver. - */ -static int resident_cpu = -1; - -bool psci_tos_resident_on(int cpu) -{ - return cpu == resident_cpu; -} - -struct psci_operations psci_ops = { - .conduit = PSCI_CONDUIT_NONE, - .smccc_version = SMCCC_VERSION_1_0, -}; - -typedef unsigned long (psci_fn)(unsigned long, unsigned long, - unsigned long, unsigned long); -static psci_fn *invoke_psci_fn; - -enum psci_function { - PSCI_FN_CPU_SUSPEND, - PSCI_FN_CPU_ON, - PSCI_FN_CPU_OFF, - PSCI_FN_MIGRATE, - PSCI_FN_MAX, -}; - -static u32 psci_function_id[PSCI_FN_MAX]; - -#define PSCI_0_2_POWER_STATE_MASK \ - (PSCI_0_2_POWER_STATE_ID_MASK | \ - PSCI_0_2_POWER_STATE_TYPE_MASK | \ - PSCI_0_2_POWER_STATE_AFFL_MASK) - -#define PSCI_1_0_EXT_POWER_STATE_MASK \ - (PSCI_1_0_EXT_POWER_STATE_ID_MASK | \ - PSCI_1_0_EXT_POWER_STATE_TYPE_MASK) - -static u32 psci_cpu_suspend_feature; - -static inline bool psci_has_ext_power_state(void) -{ - return psci_cpu_suspend_feature & - PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK; -} - -static inline bool psci_power_state_loses_context(u32 state) -{ - const u32 mask = psci_has_ext_power_state() ? - PSCI_1_0_EXT_POWER_STATE_TYPE_MASK : - PSCI_0_2_POWER_STATE_TYPE_MASK; - - return state & mask; -} - -static inline bool psci_power_state_is_valid(u32 state) -{ - const u32 valid_mask = psci_has_ext_power_state() ? - PSCI_1_0_EXT_POWER_STATE_MASK : - PSCI_0_2_POWER_STATE_MASK; - - return !(state & ~valid_mask); -} - -static unsigned long __invoke_psci_fn_hvc(unsigned long function_id, - unsigned long arg0, unsigned long arg1, - unsigned long arg2) -{ - struct arm_smccc_res res; - - arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); - return res.a0; -} - -static unsigned long __invoke_psci_fn_smc(unsigned long function_id, - unsigned long arg0, unsigned long arg1, - unsigned long arg2) -{ - struct arm_smccc_res res; - - arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); - return res.a0; -} - -static int psci_to_linux_errno(int errno) -{ - switch (errno) { - case PSCI_RET_SUCCESS: - return 0; - case PSCI_RET_NOT_SUPPORTED: - return -EOPNOTSUPP; - case PSCI_RET_INVALID_PARAMS: - case PSCI_RET_INVALID_ADDRESS: - return -EINVAL; - case PSCI_RET_DENIED: - return -EPERM; - }; - - return -EINVAL; -} - -static u32 psci_get_version(void) -{ - return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); -} - -static int psci_cpu_suspend(u32 state, unsigned long entry_point) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; - err = invoke_psci_fn(fn, state, entry_point, 0); - return psci_to_linux_errno(err); -} - -static int psci_cpu_off(u32 state) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_CPU_OFF]; - err = invoke_psci_fn(fn, state, 0, 0); - return psci_to_linux_errno(err); -} - -static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_CPU_ON]; - err = invoke_psci_fn(fn, cpuid, entry_point, 0); - return psci_to_linux_errno(err); -} - -static int psci_migrate(unsigned long cpuid) -{ - int err; - u32 fn; - - fn = psci_function_id[PSCI_FN_MIGRATE]; - err = invoke_psci_fn(fn, cpuid, 0, 0); - return psci_to_linux_errno(err); -} - -static int psci_affinity_info(unsigned long target_affinity, - unsigned long lowest_affinity_level) -{ - return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO), - target_affinity, lowest_affinity_level, 0); -} - -static int psci_migrate_info_type(void) -{ - return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0); -} - -static unsigned long psci_migrate_info_up_cpu(void) -{ - return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU), - 0, 0, 0); -} - -static void set_conduit(enum psci_conduit conduit) -{ - switch (conduit) { - case PSCI_CONDUIT_HVC: - invoke_psci_fn = __invoke_psci_fn_hvc; - break; - case PSCI_CONDUIT_SMC: - invoke_psci_fn = __invoke_psci_fn_smc; - break; - default: - WARN(1, "Unexpected PSCI conduit %d\n", conduit); - } - - psci_ops.conduit = conduit; -} - -static int get_set_conduit_method(struct device_node *np) -{ - const char *method; - - pr_info("probing for conduit method from DT.\n"); - - if (of_property_read_string(np, "method", &method)) { - pr_warn("missing \"method\" property\n"); - return -ENXIO; - } - - if (!strcmp("hvc", method)) { - set_conduit(PSCI_CONDUIT_HVC); - } else if (!strcmp("smc", method)) { - set_conduit(PSCI_CONDUIT_SMC); - } else { - pr_warn("invalid \"method\" property: %s\n", method); - return -EINVAL; - } - return 0; -} - -static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) -{ - invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); -} - -static void psci_sys_poweroff(void) -{ - invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); -} - -static int __init psci_features(u32 psci_func_id) -{ - return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES, - psci_func_id, 0, 0); -} - -#ifdef CONFIG_CPU_IDLE -static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); - -static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) -{ - int i, ret, count = 0; - u32 *psci_states; - struct device_node *state_node; - - /* Count idle states */ - while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - count))) { - count++; - of_node_put(state_node); - } - - if (!count) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - u32 state; - - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); - - ret = of_property_read_u32(state_node, - "arm,psci-suspend-param", - &state); - if (ret) { - pr_warn(" * %pOF missing arm,psci-suspend-param property\n", - state_node); - of_node_put(state_node); - goto free_mem; - } - - of_node_put(state_node); - pr_debug("psci-power-state %#x index %d\n", state, i); - if (!psci_power_state_is_valid(state)) { - pr_warn("Invalid PSCI power state %#x\n", state); - ret = -EINVAL; - goto free_mem; - } - psci_states[i] = state; - } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; - -free_mem: - kfree(psci_states); - return ret; -} - -#ifdef CONFIG_ACPI -#include - -static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) -{ - int i, count; - u32 *psci_states; - struct acpi_lpi_state *lpi; - struct acpi_processor *pr = per_cpu(processors, cpu); - - if (unlikely(!pr || !pr->flags.has_lpi)) - return -EINVAL; - - count = pr->power.count - 1; - if (count <= 0) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - u32 state; - - lpi = &pr->power.lpi_states[i + 1]; - /* - * Only bits[31:0] represent a PSCI power_state while - * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification - */ - state = lpi->address; - if (!psci_power_state_is_valid(state)) { - pr_warn("Invalid PSCI power state %#x\n", state); - kfree(psci_states); - return -EINVAL; - } - psci_states[i] = state; - } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; -} -#else -static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) -{ - return -EINVAL; -} -#endif - -int psci_cpu_init_idle(unsigned int cpu) -{ - struct device_node *cpu_node; - int ret; - - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out - */ - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - - if (!acpi_disabled) - return psci_acpi_cpu_init_idle(cpu); - - cpu_node = of_get_cpu_node(cpu, NULL); - if (!cpu_node) - return -ENODEV; - - ret = psci_dt_cpu_init_idle(cpu_node, cpu); - - of_node_put(cpu_node); - - return ret; -} - -static int psci_suspend_finisher(unsigned long index) -{ - u32 *state = __this_cpu_read(psci_power_state); - - return psci_ops.cpu_suspend(state[index - 1], - __pa_symbol(cpu_resume)); -} - -int psci_cpu_suspend_enter(unsigned long index) -{ - int ret; - u32 *state = __this_cpu_read(psci_power_state); - /* - * idle state index 0 corresponds to wfi, should never be called - * from the cpu_suspend operations - */ - if (WARN_ON_ONCE(!index)) - return -EINVAL; - - if (!psci_power_state_loses_context(state[index - 1])) - ret = psci_ops.cpu_suspend(state[index - 1], 0); - else - ret = cpu_suspend(index, psci_suspend_finisher); - - return ret; -} - -/* ARM specific CPU idle operations */ -#ifdef CONFIG_ARM -static const struct cpuidle_ops psci_cpuidle_ops __initconst = { - .suspend = psci_cpu_suspend_enter, - .init = psci_dt_cpu_init_idle, -}; - -CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); -#endif -#endif - -static int psci_system_suspend(unsigned long unused) -{ - return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), - __pa_symbol(cpu_resume), 0, 0); -} - -static int psci_system_suspend_enter(suspend_state_t state) -{ - return cpu_suspend(0, psci_system_suspend); -} - -static const struct platform_suspend_ops psci_suspend_ops = { - .valid = suspend_valid_only_mem, - .enter = psci_system_suspend_enter, -}; - -static void __init psci_init_system_suspend(void) -{ - int ret; - - if (!IS_ENABLED(CONFIG_SUSPEND)) - return; - - ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND)); - - if (ret != PSCI_RET_NOT_SUPPORTED) - suspend_set_ops(&psci_suspend_ops); -} - -static void __init psci_init_cpu_suspend(void) -{ - int feature = psci_features(psci_function_id[PSCI_FN_CPU_SUSPEND]); - - if (feature != PSCI_RET_NOT_SUPPORTED) - psci_cpu_suspend_feature = feature; -} - -/* - * Detect the presence of a resident Trusted OS which may cause CPU_OFF to - * return DENIED (which would be fatal). - */ -static void __init psci_init_migrate(void) -{ - unsigned long cpuid; - int type, cpu = -1; - - type = psci_ops.migrate_info_type(); - - if (type == PSCI_0_2_TOS_MP) { - pr_info("Trusted OS migration not required\n"); - return; - } - - if (type == PSCI_RET_NOT_SUPPORTED) { - pr_info("MIGRATE_INFO_TYPE not supported.\n"); - return; - } - - if (type != PSCI_0_2_TOS_UP_MIGRATE && - type != PSCI_0_2_TOS_UP_NO_MIGRATE) { - pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); - return; - } - - cpuid = psci_migrate_info_up_cpu(); - if (cpuid & ~MPIDR_HWID_BITMASK) { - pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n", - cpuid); - return; - } - - cpu = get_logical_index(cpuid); - resident_cpu = cpu >= 0 ? cpu : -1; - - pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); -} - -static void __init psci_init_smccc(void) -{ - u32 ver = ARM_SMCCC_VERSION_1_0; - int feature; - - feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); - - if (feature != PSCI_RET_NOT_SUPPORTED) { - u32 ret; - ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); - if (ret == ARM_SMCCC_VERSION_1_1) { - psci_ops.smccc_version = SMCCC_VERSION_1_1; - ver = ret; - } - } - - /* - * Conveniently, the SMCCC and PSCI versions are encoded the - * same way. No, this isn't accidental. - */ - pr_info("SMC Calling Convention v%d.%d\n", - PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); - -} - -static void __init psci_0_2_set_functions(void) -{ - pr_info("Using standard PSCI v0.2 function IDs\n"); - psci_ops.get_version = psci_get_version; - - psci_function_id[PSCI_FN_CPU_SUSPEND] = - PSCI_FN_NATIVE(0_2, CPU_SUSPEND); - psci_ops.cpu_suspend = psci_cpu_suspend; - - psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; - psci_ops.cpu_off = psci_cpu_off; - - psci_function_id[PSCI_FN_CPU_ON] = PSCI_FN_NATIVE(0_2, CPU_ON); - psci_ops.cpu_on = psci_cpu_on; - - psci_function_id[PSCI_FN_MIGRATE] = PSCI_FN_NATIVE(0_2, MIGRATE); - psci_ops.migrate = psci_migrate; - - psci_ops.affinity_info = psci_affinity_info; - - psci_ops.migrate_info_type = psci_migrate_info_type; - - arm_pm_restart = psci_sys_reset; - - pm_power_off = psci_sys_poweroff; -} - -/* - * Probe function for PSCI firmware versions >= 0.2 - */ -static int __init psci_probe(void) -{ - u32 ver = psci_get_version(); - - pr_info("PSCIv%d.%d detected in firmware.\n", - PSCI_VERSION_MAJOR(ver), - PSCI_VERSION_MINOR(ver)); - - if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) { - pr_err("Conflicting PSCI version detected.\n"); - return -EINVAL; - } - - psci_0_2_set_functions(); - - psci_init_migrate(); - - if (PSCI_VERSION_MAJOR(ver) >= 1) { - psci_init_smccc(); - psci_init_cpu_suspend(); - psci_init_system_suspend(); - } - - return 0; -} - -typedef int (*psci_initcall_t)(const struct device_node *); - -/* - * PSCI init function for PSCI versions >=0.2 - * - * Probe based on PSCI PSCI_VERSION function - */ -static int __init psci_0_2_init(struct device_node *np) -{ - int err; - - err = get_set_conduit_method(np); - - if (err) - goto out_put_node; - /* - * Starting with v0.2, the PSCI specification introduced a call - * (PSCI_VERSION) that allows probing the firmware version, so - * that PSCI function IDs and version specific initialization - * can be carried out according to the specific version reported - * by firmware - */ - err = psci_probe(); - -out_put_node: - of_node_put(np); - return err; -} - -/* - * PSCI < v0.2 get PSCI Function IDs via DT. - */ -static int __init psci_0_1_init(struct device_node *np) -{ - u32 id; - int err; - - err = get_set_conduit_method(np); - - if (err) - goto out_put_node; - - pr_info("Using PSCI v0.1 Function IDs from DT\n"); - - if (!of_property_read_u32(np, "cpu_suspend", &id)) { - psci_function_id[PSCI_FN_CPU_SUSPEND] = id; - psci_ops.cpu_suspend = psci_cpu_suspend; - } - - if (!of_property_read_u32(np, "cpu_off", &id)) { - psci_function_id[PSCI_FN_CPU_OFF] = id; - psci_ops.cpu_off = psci_cpu_off; - } - - if (!of_property_read_u32(np, "cpu_on", &id)) { - psci_function_id[PSCI_FN_CPU_ON] = id; - psci_ops.cpu_on = psci_cpu_on; - } - - if (!of_property_read_u32(np, "migrate", &id)) { - psci_function_id[PSCI_FN_MIGRATE] = id; - psci_ops.migrate = psci_migrate; - } - -out_put_node: - of_node_put(np); - return err; -} - -static const struct of_device_id psci_of_match[] __initconst = { - { .compatible = "arm,psci", .data = psci_0_1_init}, - { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, - { .compatible = "arm,psci-1.0", .data = psci_0_2_init}, - {}, -}; - -int __init psci_dt_init(void) -{ - struct device_node *np; - const struct of_device_id *matched_np; - psci_initcall_t init_fn; - - np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); - - if (!np || !of_device_is_available(np)) - return -ENODEV; - - init_fn = (psci_initcall_t)matched_np->data; - return init_fn(np); -} - -#ifdef CONFIG_ACPI -/* - * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's - * explicitly clarified in SBBR - */ -int __init psci_acpi_init(void) -{ - if (!acpi_psci_present()) { - pr_info("is not implemented in ACPI.\n"); - return -EOPNOTSUPP; - } - - pr_info("probing for conduit method from ACPI.\n"); - - if (acpi_psci_use_hvc()) - set_conduit(PSCI_CONDUIT_HVC); - else - set_conduit(PSCI_CONDUIT_SMC); - - return psci_probe(); -} -#endif diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig new file mode 100644 index 000000000000..26a3b32bf7ab --- /dev/null +++ b/drivers/firmware/psci/Kconfig @@ -0,0 +1,13 @@ +config ARM_PSCI_FW + bool + +config ARM_PSCI_CHECKER + bool "ARM PSCI checker" + depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST + help + Run the PSCI checker during startup. This checks that hotplug and + suspend operations work correctly when using PSCI. + + The torture tests may interfere with the PSCI checker by turning CPUs + on and off through hotplug, so for now torture tests and PSCI checker + are mutually exclusive. diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile new file mode 100644 index 000000000000..1956b882470f --- /dev/null +++ b/drivers/firmware/psci/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +# +obj-$(CONFIG_ARM_PSCI_FW) += psci.o +obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c new file mode 100644 index 000000000000..c80ec1d03274 --- /dev/null +++ b/drivers/firmware/psci/psci.c @@ -0,0 +1,708 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +/* + * While a 64-bit OS can make calls with SMC32 calling conventions, for some + * calls it is necessary to use SMC64 to pass or return 64-bit values. + * For such calls PSCI_FN_NATIVE(version, name) will choose the appropriate + * (native-width) function ID. + */ +#ifdef CONFIG_64BIT +#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name +#else +#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name +#endif + +/* + * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF + * calls to its resident CPU, so we must avoid issuing those. We never migrate + * a Trusted OS even if it claims to be capable of migration -- doing so will + * require cooperation with a Trusted OS driver. + */ +static int resident_cpu = -1; + +bool psci_tos_resident_on(int cpu) +{ + return cpu == resident_cpu; +} + +struct psci_operations psci_ops = { + .conduit = PSCI_CONDUIT_NONE, + .smccc_version = SMCCC_VERSION_1_0, +}; + +typedef unsigned long (psci_fn)(unsigned long, unsigned long, + unsigned long, unsigned long); +static psci_fn *invoke_psci_fn; + +enum psci_function { + PSCI_FN_CPU_SUSPEND, + PSCI_FN_CPU_ON, + PSCI_FN_CPU_OFF, + PSCI_FN_MIGRATE, + PSCI_FN_MAX, +}; + +static u32 psci_function_id[PSCI_FN_MAX]; + +#define PSCI_0_2_POWER_STATE_MASK \ + (PSCI_0_2_POWER_STATE_ID_MASK | \ + PSCI_0_2_POWER_STATE_TYPE_MASK | \ + PSCI_0_2_POWER_STATE_AFFL_MASK) + +#define PSCI_1_0_EXT_POWER_STATE_MASK \ + (PSCI_1_0_EXT_POWER_STATE_ID_MASK | \ + PSCI_1_0_EXT_POWER_STATE_TYPE_MASK) + +static u32 psci_cpu_suspend_feature; + +static inline bool psci_has_ext_power_state(void) +{ + return psci_cpu_suspend_feature & + PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK; +} + +static inline bool psci_power_state_loses_context(u32 state) +{ + const u32 mask = psci_has_ext_power_state() ? + PSCI_1_0_EXT_POWER_STATE_TYPE_MASK : + PSCI_0_2_POWER_STATE_TYPE_MASK; + + return state & mask; +} + +static inline bool psci_power_state_is_valid(u32 state) +{ + const u32 valid_mask = psci_has_ext_power_state() ? + PSCI_1_0_EXT_POWER_STATE_MASK : + PSCI_0_2_POWER_STATE_MASK; + + return !(state & ~valid_mask); +} + +static unsigned long __invoke_psci_fn_hvc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static unsigned long __invoke_psci_fn_smc(unsigned long function_id, + unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res); + return res.a0; +} + +static int psci_to_linux_errno(int errno) +{ + switch (errno) { + case PSCI_RET_SUCCESS: + return 0; + case PSCI_RET_NOT_SUPPORTED: + return -EOPNOTSUPP; + case PSCI_RET_INVALID_PARAMS: + case PSCI_RET_INVALID_ADDRESS: + return -EINVAL; + case PSCI_RET_DENIED: + return -EPERM; + }; + + return -EINVAL; +} + +static u32 psci_get_version(void) +{ + return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0); +} + +static int psci_cpu_suspend(u32 state, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; + err = invoke_psci_fn(fn, state, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_off(u32 state) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_OFF]; + err = invoke_psci_fn(fn, state, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_ON]; + err = invoke_psci_fn(fn, cpuid, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_migrate(unsigned long cpuid) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_MIGRATE]; + err = invoke_psci_fn(fn, cpuid, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_affinity_info(unsigned long target_affinity, + unsigned long lowest_affinity_level) +{ + return invoke_psci_fn(PSCI_FN_NATIVE(0_2, AFFINITY_INFO), + target_affinity, lowest_affinity_level, 0); +} + +static int psci_migrate_info_type(void) +{ + return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0); +} + +static unsigned long psci_migrate_info_up_cpu(void) +{ + return invoke_psci_fn(PSCI_FN_NATIVE(0_2, MIGRATE_INFO_UP_CPU), + 0, 0, 0); +} + +static void set_conduit(enum psci_conduit conduit) +{ + switch (conduit) { + case PSCI_CONDUIT_HVC: + invoke_psci_fn = __invoke_psci_fn_hvc; + break; + case PSCI_CONDUIT_SMC: + invoke_psci_fn = __invoke_psci_fn_smc; + break; + default: + WARN(1, "Unexpected PSCI conduit %d\n", conduit); + } + + psci_ops.conduit = conduit; +} + +static int get_set_conduit_method(struct device_node *np) +{ + const char *method; + + pr_info("probing for conduit method from DT.\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warn("missing \"method\" property\n"); + return -ENXIO; + } + + if (!strcmp("hvc", method)) { + set_conduit(PSCI_CONDUIT_HVC); + } else if (!strcmp("smc", method)) { + set_conduit(PSCI_CONDUIT_SMC); + } else { + pr_warn("invalid \"method\" property: %s\n", method); + return -EINVAL; + } + return 0; +} + +static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); +} + +static void psci_sys_poweroff(void) +{ + invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); +} + +static int __init psci_features(u32 psci_func_id) +{ + return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES, + psci_func_id, 0, 0); +} + +#ifdef CONFIG_CPU_IDLE +static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); + +static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +{ + int i, ret, count = 0; + u32 *psci_states; + struct device_node *state_node; + + /* Count idle states */ + while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + count))) { + count++; + of_node_put(state_node); + } + + if (!count) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + u32 state; + + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + + ret = of_property_read_u32(state_node, + "arm,psci-suspend-param", + &state); + if (ret) { + pr_warn(" * %pOF missing arm,psci-suspend-param property\n", + state_node); + of_node_put(state_node); + goto free_mem; + } + + of_node_put(state_node); + pr_debug("psci-power-state %#x index %d\n", state, i); + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + ret = -EINVAL; + goto free_mem; + } + psci_states[i] = state; + } + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; + +free_mem: + kfree(psci_states); + return ret; +} + +#ifdef CONFIG_ACPI +#include + +static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) +{ + int i, count; + u32 *psci_states; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i + 1]; + /* + * Only bits[31:0] represent a PSCI power_state while + * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification + */ + state = lpi->address; + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + kfree(psci_states); + return -EINVAL; + } + psci_states[i] = state; + } + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; +} +#else +static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) +{ + return -EINVAL; +} +#endif + +int psci_cpu_init_idle(unsigned int cpu) +{ + struct device_node *cpu_node; + int ret; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + if (!acpi_disabled) + return psci_acpi_cpu_init_idle(cpu); + + cpu_node = of_get_cpu_node(cpu, NULL); + if (!cpu_node) + return -ENODEV; + + ret = psci_dt_cpu_init_idle(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} + +static int psci_suspend_finisher(unsigned long index) +{ + u32 *state = __this_cpu_read(psci_power_state); + + return psci_ops.cpu_suspend(state[index - 1], + __pa_symbol(cpu_resume)); +} + +int psci_cpu_suspend_enter(unsigned long index) +{ + int ret; + u32 *state = __this_cpu_read(psci_power_state); + /* + * idle state index 0 corresponds to wfi, should never be called + * from the cpu_suspend operations + */ + if (WARN_ON_ONCE(!index)) + return -EINVAL; + + if (!psci_power_state_loses_context(state[index - 1])) + ret = psci_ops.cpu_suspend(state[index - 1], 0); + else + ret = cpu_suspend(index, psci_suspend_finisher); + + return ret; +} + +/* ARM specific CPU idle operations */ +#ifdef CONFIG_ARM +static const struct cpuidle_ops psci_cpuidle_ops __initconst = { + .suspend = psci_cpu_suspend_enter, + .init = psci_dt_cpu_init_idle, +}; + +CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); +#endif +#endif + +static int psci_system_suspend(unsigned long unused) +{ + return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), + __pa_symbol(cpu_resume), 0, 0); +} + +static int psci_system_suspend_enter(suspend_state_t state) +{ + return cpu_suspend(0, psci_system_suspend); +} + +static const struct platform_suspend_ops psci_suspend_ops = { + .valid = suspend_valid_only_mem, + .enter = psci_system_suspend_enter, +}; + +static void __init psci_init_system_suspend(void) +{ + int ret; + + if (!IS_ENABLED(CONFIG_SUSPEND)) + return; + + ret = psci_features(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND)); + + if (ret != PSCI_RET_NOT_SUPPORTED) + suspend_set_ops(&psci_suspend_ops); +} + +static void __init psci_init_cpu_suspend(void) +{ + int feature = psci_features(psci_function_id[PSCI_FN_CPU_SUSPEND]); + + if (feature != PSCI_RET_NOT_SUPPORTED) + psci_cpu_suspend_feature = feature; +} + +/* + * Detect the presence of a resident Trusted OS which may cause CPU_OFF to + * return DENIED (which would be fatal). + */ +static void __init psci_init_migrate(void) +{ + unsigned long cpuid; + int type, cpu = -1; + + type = psci_ops.migrate_info_type(); + + if (type == PSCI_0_2_TOS_MP) { + pr_info("Trusted OS migration not required\n"); + return; + } + + if (type == PSCI_RET_NOT_SUPPORTED) { + pr_info("MIGRATE_INFO_TYPE not supported.\n"); + return; + } + + if (type != PSCI_0_2_TOS_UP_MIGRATE && + type != PSCI_0_2_TOS_UP_NO_MIGRATE) { + pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type); + return; + } + + cpuid = psci_migrate_info_up_cpu(); + if (cpuid & ~MPIDR_HWID_BITMASK) { + pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n", + cpuid); + return; + } + + cpu = get_logical_index(cpuid); + resident_cpu = cpu >= 0 ? cpu : -1; + + pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); +} + +static void __init psci_init_smccc(void) +{ + u32 ver = ARM_SMCCC_VERSION_1_0; + int feature; + + feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); + + if (feature != PSCI_RET_NOT_SUPPORTED) { + u32 ret; + ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); + if (ret == ARM_SMCCC_VERSION_1_1) { + psci_ops.smccc_version = SMCCC_VERSION_1_1; + ver = ret; + } + } + + /* + * Conveniently, the SMCCC and PSCI versions are encoded the + * same way. No, this isn't accidental. + */ + pr_info("SMC Calling Convention v%d.%d\n", + PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); + +} + +static void __init psci_0_2_set_functions(void) +{ + pr_info("Using standard PSCI v0.2 function IDs\n"); + psci_ops.get_version = psci_get_version; + + psci_function_id[PSCI_FN_CPU_SUSPEND] = + PSCI_FN_NATIVE(0_2, CPU_SUSPEND); + psci_ops.cpu_suspend = psci_cpu_suspend; + + psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF; + psci_ops.cpu_off = psci_cpu_off; + + psci_function_id[PSCI_FN_CPU_ON] = PSCI_FN_NATIVE(0_2, CPU_ON); + psci_ops.cpu_on = psci_cpu_on; + + psci_function_id[PSCI_FN_MIGRATE] = PSCI_FN_NATIVE(0_2, MIGRATE); + psci_ops.migrate = psci_migrate; + + psci_ops.affinity_info = psci_affinity_info; + + psci_ops.migrate_info_type = psci_migrate_info_type; + + arm_pm_restart = psci_sys_reset; + + pm_power_off = psci_sys_poweroff; +} + +/* + * Probe function for PSCI firmware versions >= 0.2 + */ +static int __init psci_probe(void) +{ + u32 ver = psci_get_version(); + + pr_info("PSCIv%d.%d detected in firmware.\n", + PSCI_VERSION_MAJOR(ver), + PSCI_VERSION_MINOR(ver)); + + if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) { + pr_err("Conflicting PSCI version detected.\n"); + return -EINVAL; + } + + psci_0_2_set_functions(); + + psci_init_migrate(); + + if (PSCI_VERSION_MAJOR(ver) >= 1) { + psci_init_smccc(); + psci_init_cpu_suspend(); + psci_init_system_suspend(); + } + + return 0; +} + +typedef int (*psci_initcall_t)(const struct device_node *); + +/* + * PSCI init function for PSCI versions >=0.2 + * + * Probe based on PSCI PSCI_VERSION function + */ +static int __init psci_0_2_init(struct device_node *np) +{ + int err; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + /* + * Starting with v0.2, the PSCI specification introduced a call + * (PSCI_VERSION) that allows probing the firmware version, so + * that PSCI function IDs and version specific initialization + * can be carried out according to the specific version reported + * by firmware + */ + err = psci_probe(); + +out_put_node: + of_node_put(np); + return err; +} + +/* + * PSCI < v0.2 get PSCI Function IDs via DT. + */ +static int __init psci_0_1_init(struct device_node *np) +{ + u32 id; + int err; + + err = get_set_conduit_method(np); + + if (err) + goto out_put_node; + + pr_info("Using PSCI v0.1 Function IDs from DT\n"); + + if (!of_property_read_u32(np, "cpu_suspend", &id)) { + psci_function_id[PSCI_FN_CPU_SUSPEND] = id; + psci_ops.cpu_suspend = psci_cpu_suspend; + } + + if (!of_property_read_u32(np, "cpu_off", &id)) { + psci_function_id[PSCI_FN_CPU_OFF] = id; + psci_ops.cpu_off = psci_cpu_off; + } + + if (!of_property_read_u32(np, "cpu_on", &id)) { + psci_function_id[PSCI_FN_CPU_ON] = id; + psci_ops.cpu_on = psci_cpu_on; + } + + if (!of_property_read_u32(np, "migrate", &id)) { + psci_function_id[PSCI_FN_MIGRATE] = id; + psci_ops.migrate = psci_migrate; + } + +out_put_node: + of_node_put(np); + return err; +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", .data = psci_0_1_init}, + { .compatible = "arm,psci-0.2", .data = psci_0_2_init}, + { .compatible = "arm,psci-1.0", .data = psci_0_2_init}, + {}, +}; + +int __init psci_dt_init(void) +{ + struct device_node *np; + const struct of_device_id *matched_np; + psci_initcall_t init_fn; + + np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); + + if (!np || !of_device_is_available(np)) + return -ENODEV; + + init_fn = (psci_initcall_t)matched_np->data; + return init_fn(np); +} + +#ifdef CONFIG_ACPI +/* + * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's + * explicitly clarified in SBBR + */ +int __init psci_acpi_init(void) +{ + if (!acpi_psci_present()) { + pr_info("is not implemented in ACPI.\n"); + return -EOPNOTSUPP; + } + + pr_info("probing for conduit method from ACPI.\n"); + + if (acpi_psci_use_hvc()) + set_conduit(PSCI_CONDUIT_HVC); + else + set_conduit(PSCI_CONDUIT_SMC); + + return psci_probe(); +} +#endif diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c new file mode 100644 index 000000000000..346943657962 --- /dev/null +++ b/drivers/firmware/psci/psci_checker.c @@ -0,0 +1,505 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2016 ARM Limited + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#define NUM_SUSPEND_CYCLE (10) + +static unsigned int nb_available_cpus; +static int tos_resident_cpu = -1; + +static atomic_t nb_active_threads; +static struct completion suspend_threads_started = + COMPLETION_INITIALIZER(suspend_threads_started); +static struct completion suspend_threads_done = + COMPLETION_INITIALIZER(suspend_threads_done); + +/* + * We assume that PSCI operations are used if they are available. This is not + * necessarily true on arm64, since the decision is based on the + * "enable-method" property of each CPU in the DT, but given that there is no + * arch-specific way to check this, we assume that the DT is sensible. + */ +static int psci_ops_check(void) +{ + int migrate_type = -1; + int cpu; + + if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { + pr_warn("Missing PSCI operations, aborting tests\n"); + return -EOPNOTSUPP; + } + + if (psci_ops.migrate_info_type) + migrate_type = psci_ops.migrate_info_type(); + + if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || + migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { + /* There is a UP Trusted OS, find on which core it resides. */ + for_each_online_cpu(cpu) + if (psci_tos_resident_on(cpu)) { + tos_resident_cpu = cpu; + break; + } + if (tos_resident_cpu == -1) + pr_warn("UP Trusted OS resides on no online CPU\n"); + } + + return 0; +} + +/* + * offlined_cpus is a temporary array but passing it as an argument avoids + * multiple allocations. + */ +static unsigned int down_and_up_cpus(const struct cpumask *cpus, + struct cpumask *offlined_cpus) +{ + int cpu; + int err = 0; + + cpumask_clear(offlined_cpus); + + /* Try to power down all CPUs in the mask. */ + for_each_cpu(cpu, cpus) { + int ret = cpu_down(cpu); + + /* + * cpu_down() checks the number of online CPUs before the TOS + * resident CPU. + */ + if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { + if (ret != -EBUSY) { + pr_err("Unexpected return code %d while trying " + "to power down last online CPU %d\n", + ret, cpu); + ++err; + } + } else if (cpu == tos_resident_cpu) { + if (ret != -EPERM) { + pr_err("Unexpected return code %d while trying " + "to power down TOS resident CPU %d\n", + ret, cpu); + ++err; + } + } else if (ret != 0) { + pr_err("Error occurred (%d) while trying " + "to power down CPU %d\n", ret, cpu); + ++err; + } + + if (ret == 0) + cpumask_set_cpu(cpu, offlined_cpus); + } + + /* Try to power up all the CPUs that have been offlined. */ + for_each_cpu(cpu, offlined_cpus) { + int ret = cpu_up(cpu); + + if (ret != 0) { + pr_err("Error occurred (%d) while trying " + "to power up CPU %d\n", ret, cpu); + ++err; + } else { + cpumask_clear_cpu(cpu, offlined_cpus); + } + } + + /* + * Something went bad at some point and some CPUs could not be turned + * back on. + */ + WARN_ON(!cpumask_empty(offlined_cpus) || + num_online_cpus() != nb_available_cpus); + + return err; +} + +static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups) +{ + int i; + cpumask_var_t *cpu_groups = *pcpu_groups; + + for (i = 0; i < num; ++i) + free_cpumask_var(cpu_groups[i]); + kfree(cpu_groups); +} + +static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) +{ + int num_groups = 0; + cpumask_var_t tmp, *cpu_groups; + + if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) + return -ENOMEM; + + cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), + GFP_KERNEL); + if (!cpu_groups) + return -ENOMEM; + + cpumask_copy(tmp, cpu_online_mask); + + while (!cpumask_empty(tmp)) { + const struct cpumask *cpu_group = + topology_core_cpumask(cpumask_any(tmp)); + + if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { + free_cpu_groups(num_groups, &cpu_groups); + return -ENOMEM; + } + cpumask_copy(cpu_groups[num_groups++], cpu_group); + cpumask_andnot(tmp, tmp, cpu_group); + } + + free_cpumask_var(tmp); + *pcpu_groups = cpu_groups; + + return num_groups; +} + +static int hotplug_tests(void) +{ + int i, nb_cpu_group, err = -ENOMEM; + cpumask_var_t offlined_cpus, *cpu_groups; + char *page_buf; + + if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) + return err; + + nb_cpu_group = alloc_init_cpu_groups(&cpu_groups); + if (nb_cpu_group < 0) + goto out_free_cpus; + page_buf = (char *)__get_free_page(GFP_KERNEL); + if (!page_buf) + goto out_free_cpu_groups; + + err = 0; + /* + * Of course the last CPU cannot be powered down and cpu_down() should + * refuse doing that. + */ + pr_info("Trying to turn off and on again all CPUs\n"); + err += down_and_up_cpus(cpu_online_mask, offlined_cpus); + + /* + * Take down CPUs by cpu group this time. When the last CPU is turned + * off, the cpu group itself should shut down. + */ + for (i = 0; i < nb_cpu_group; ++i) { + ssize_t len = cpumap_print_to_pagebuf(true, page_buf, + cpu_groups[i]); + /* Remove trailing newline. */ + page_buf[len - 1] = '\0'; + pr_info("Trying to turn off and on again group %d (CPUs %s)\n", + i, page_buf); + err += down_and_up_cpus(cpu_groups[i], offlined_cpus); + } + + free_page((unsigned long)page_buf); +out_free_cpu_groups: + free_cpu_groups(nb_cpu_group, &cpu_groups); +out_free_cpus: + free_cpumask_var(offlined_cpus); + return err; +} + +static void dummy_callback(struct timer_list *unused) {} + +static int suspend_cpu(int index, bool broadcast) +{ + int ret; + + arch_cpu_idle_enter(); + + if (broadcast) { + /* + * The local timer will be shut down, we need to enter tick + * broadcast. + */ + ret = tick_broadcast_enter(); + if (ret) { + /* + * In the absence of hardware broadcast mechanism, + * this CPU might be used to broadcast wakeups, which + * may be why entering tick broadcast has failed. + * There is little the kernel can do to work around + * that, so enter WFI instead (idle state 0). + */ + cpu_do_idle(); + ret = 0; + goto out_arch_exit; + } + } + + /* + * Replicate the common ARM cpuidle enter function + * (arm_enter_idle_state). + */ + ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); + + if (broadcast) + tick_broadcast_exit(); + +out_arch_exit: + arch_cpu_idle_exit(); + + return ret; +} + +static int suspend_test_thread(void *arg) +{ + int cpu = (long)arg; + int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; + struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 }; + struct cpuidle_device *dev; + struct cpuidle_driver *drv; + /* No need for an actual callback, we just want to wake up the CPU. */ + struct timer_list wakeup_timer; + + /* Wait for the main thread to give the start signal. */ + wait_for_completion(&suspend_threads_started); + + /* Set maximum priority to preempt all other threads on this CPU. */ + if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) + pr_warn("Failed to set suspend thread scheduler on CPU %d\n", + cpu); + + dev = this_cpu_read(cpuidle_devices); + drv = cpuidle_get_cpu_driver(dev); + + pr_info("CPU %d entering suspend cycles, states 1 through %d\n", + cpu, drv->state_count - 1); + + timer_setup_on_stack(&wakeup_timer, dummy_callback, 0); + for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { + int index; + /* + * Test all possible states, except 0 (which is usually WFI and + * doesn't use PSCI). + */ + for (index = 1; index < drv->state_count; ++index) { + struct cpuidle_state *state = &drv->states[index]; + bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; + int ret; + + /* + * Set the timer to wake this CPU up in some time (which + * should be largely sufficient for entering suspend). + * If the local tick is disabled when entering suspend, + * suspend_cpu() takes care of switching to a broadcast + * tick, so the timer will still wake us up. + */ + mod_timer(&wakeup_timer, jiffies + + usecs_to_jiffies(state->target_residency)); + + /* IRQs must be disabled during suspend operations. */ + local_irq_disable(); + + ret = suspend_cpu(index, broadcast); + + /* + * We have woken up. Re-enable IRQs to handle any + * pending interrupt, do not wait until the end of the + * loop. + */ + local_irq_enable(); + + if (ret == index) { + ++nb_suspend; + } else if (ret >= 0) { + /* We did not enter the expected state. */ + ++nb_shallow_sleep; + } else { + pr_err("Failed to suspend CPU %d: error %d " + "(requested state %d, cycle %d)\n", + cpu, ret, index, i); + ++nb_err; + } + } + } + + /* + * Disable the timer to make sure that the timer will not trigger + * later. + */ + del_timer(&wakeup_timer); + destroy_timer_on_stack(&wakeup_timer); + + if (atomic_dec_return_relaxed(&nb_active_threads) == 0) + complete(&suspend_threads_done); + + /* Give up on RT scheduling and wait for termination. */ + sched_priority.sched_priority = 0; + if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) + pr_warn("Failed to set suspend thread scheduler on CPU %d\n", + cpu); + for (;;) { + /* Needs to be set first to avoid missing a wakeup. */ + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + break; + } + schedule(); + } + + pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", + cpu, nb_suspend, nb_shallow_sleep, nb_err); + + return nb_err; +} + +static int suspend_tests(void) +{ + int i, cpu, err = 0; + struct task_struct **threads; + int nb_threads = 0; + + threads = kmalloc_array(nb_available_cpus, sizeof(*threads), + GFP_KERNEL); + if (!threads) + return -ENOMEM; + + /* + * Stop cpuidle to prevent the idle tasks from entering a deep sleep + * mode, as it might interfere with the suspend threads on other CPUs. + * This does not prevent the suspend threads from using cpuidle (only + * the idle tasks check this status). Take the idle lock so that + * the cpuidle driver and device look-up can be carried out safely. + */ + cpuidle_pause_and_lock(); + + for_each_online_cpu(cpu) { + struct task_struct *thread; + /* Check that cpuidle is available on that CPU. */ + struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + + if (!dev || !drv) { + pr_warn("cpuidle not available on CPU %d, ignoring\n", + cpu); + continue; + } + + thread = kthread_create_on_cpu(suspend_test_thread, + (void *)(long)cpu, cpu, + "psci_suspend_test"); + if (IS_ERR(thread)) + pr_err("Failed to create kthread on CPU %d\n", cpu); + else + threads[nb_threads++] = thread; + } + + if (nb_threads < 1) { + err = -ENODEV; + goto out; + } + + atomic_set(&nb_active_threads, nb_threads); + + /* + * Wake up the suspend threads. To avoid the main thread being preempted + * before all the threads have been unparked, the suspend threads will + * wait for the completion of suspend_threads_started. + */ + for (i = 0; i < nb_threads; ++i) + wake_up_process(threads[i]); + complete_all(&suspend_threads_started); + + wait_for_completion(&suspend_threads_done); + + + /* Stop and destroy all threads, get return status. */ + for (i = 0; i < nb_threads; ++i) + err += kthread_stop(threads[i]); + out: + cpuidle_resume_and_unlock(); + kfree(threads); + return err; +} + +static int __init psci_checker(void) +{ + int ret; + + /* + * Since we're in an initcall, we assume that all the CPUs that all + * CPUs that can be onlined have been onlined. + * + * The tests assume that hotplug is enabled but nobody else is using it, + * otherwise the results will be unpredictable. However, since there + * is no userspace yet in initcalls, that should be fine, as long as + * no torture test is running at the same time (see Kconfig). + */ + nb_available_cpus = num_online_cpus(); + + /* Check PSCI operations are set up and working. */ + ret = psci_ops_check(); + if (ret) + return ret; + + pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); + + pr_info("Starting hotplug tests\n"); + ret = hotplug_tests(); + if (ret == 0) + pr_info("Hotplug tests passed OK\n"); + else if (ret > 0) + pr_err("%d error(s) encountered in hotplug tests\n", ret); + else { + pr_err("Out of memory\n"); + return ret; + } + + pr_info("Starting suspend tests (%d cycles per state)\n", + NUM_SUSPEND_CYCLE); + ret = suspend_tests(); + if (ret == 0) + pr_info("Suspend tests passed OK\n"); + else if (ret > 0) + pr_err("%d error(s) encountered in suspend tests\n", ret); + else { + switch (ret) { + case -ENOMEM: + pr_err("Out of memory\n"); + break; + case -ENODEV: + pr_warn("Could not start suspend tests on any CPU\n"); + break; + } + } + + pr_info("PSCI checker completed\n"); + return ret < 0 ? ret : 0; +} +late_initcall(psci_checker); diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c deleted file mode 100644 index 346943657962..000000000000 --- a/drivers/firmware/psci_checker.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2016 ARM Limited - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -#define NUM_SUSPEND_CYCLE (10) - -static unsigned int nb_available_cpus; -static int tos_resident_cpu = -1; - -static atomic_t nb_active_threads; -static struct completion suspend_threads_started = - COMPLETION_INITIALIZER(suspend_threads_started); -static struct completion suspend_threads_done = - COMPLETION_INITIALIZER(suspend_threads_done); - -/* - * We assume that PSCI operations are used if they are available. This is not - * necessarily true on arm64, since the decision is based on the - * "enable-method" property of each CPU in the DT, but given that there is no - * arch-specific way to check this, we assume that the DT is sensible. - */ -static int psci_ops_check(void) -{ - int migrate_type = -1; - int cpu; - - if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { - pr_warn("Missing PSCI operations, aborting tests\n"); - return -EOPNOTSUPP; - } - - if (psci_ops.migrate_info_type) - migrate_type = psci_ops.migrate_info_type(); - - if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || - migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { - /* There is a UP Trusted OS, find on which core it resides. */ - for_each_online_cpu(cpu) - if (psci_tos_resident_on(cpu)) { - tos_resident_cpu = cpu; - break; - } - if (tos_resident_cpu == -1) - pr_warn("UP Trusted OS resides on no online CPU\n"); - } - - return 0; -} - -/* - * offlined_cpus is a temporary array but passing it as an argument avoids - * multiple allocations. - */ -static unsigned int down_and_up_cpus(const struct cpumask *cpus, - struct cpumask *offlined_cpus) -{ - int cpu; - int err = 0; - - cpumask_clear(offlined_cpus); - - /* Try to power down all CPUs in the mask. */ - for_each_cpu(cpu, cpus) { - int ret = cpu_down(cpu); - - /* - * cpu_down() checks the number of online CPUs before the TOS - * resident CPU. - */ - if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { - if (ret != -EBUSY) { - pr_err("Unexpected return code %d while trying " - "to power down last online CPU %d\n", - ret, cpu); - ++err; - } - } else if (cpu == tos_resident_cpu) { - if (ret != -EPERM) { - pr_err("Unexpected return code %d while trying " - "to power down TOS resident CPU %d\n", - ret, cpu); - ++err; - } - } else if (ret != 0) { - pr_err("Error occurred (%d) while trying " - "to power down CPU %d\n", ret, cpu); - ++err; - } - - if (ret == 0) - cpumask_set_cpu(cpu, offlined_cpus); - } - - /* Try to power up all the CPUs that have been offlined. */ - for_each_cpu(cpu, offlined_cpus) { - int ret = cpu_up(cpu); - - if (ret != 0) { - pr_err("Error occurred (%d) while trying " - "to power up CPU %d\n", ret, cpu); - ++err; - } else { - cpumask_clear_cpu(cpu, offlined_cpus); - } - } - - /* - * Something went bad at some point and some CPUs could not be turned - * back on. - */ - WARN_ON(!cpumask_empty(offlined_cpus) || - num_online_cpus() != nb_available_cpus); - - return err; -} - -static void free_cpu_groups(int num, cpumask_var_t **pcpu_groups) -{ - int i; - cpumask_var_t *cpu_groups = *pcpu_groups; - - for (i = 0; i < num; ++i) - free_cpumask_var(cpu_groups[i]); - kfree(cpu_groups); -} - -static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups) -{ - int num_groups = 0; - cpumask_var_t tmp, *cpu_groups; - - if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) - return -ENOMEM; - - cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups), - GFP_KERNEL); - if (!cpu_groups) - return -ENOMEM; - - cpumask_copy(tmp, cpu_online_mask); - - while (!cpumask_empty(tmp)) { - const struct cpumask *cpu_group = - topology_core_cpumask(cpumask_any(tmp)); - - if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) { - free_cpu_groups(num_groups, &cpu_groups); - return -ENOMEM; - } - cpumask_copy(cpu_groups[num_groups++], cpu_group); - cpumask_andnot(tmp, tmp, cpu_group); - } - - free_cpumask_var(tmp); - *pcpu_groups = cpu_groups; - - return num_groups; -} - -static int hotplug_tests(void) -{ - int i, nb_cpu_group, err = -ENOMEM; - cpumask_var_t offlined_cpus, *cpu_groups; - char *page_buf; - - if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) - return err; - - nb_cpu_group = alloc_init_cpu_groups(&cpu_groups); - if (nb_cpu_group < 0) - goto out_free_cpus; - page_buf = (char *)__get_free_page(GFP_KERNEL); - if (!page_buf) - goto out_free_cpu_groups; - - err = 0; - /* - * Of course the last CPU cannot be powered down and cpu_down() should - * refuse doing that. - */ - pr_info("Trying to turn off and on again all CPUs\n"); - err += down_and_up_cpus(cpu_online_mask, offlined_cpus); - - /* - * Take down CPUs by cpu group this time. When the last CPU is turned - * off, the cpu group itself should shut down. - */ - for (i = 0; i < nb_cpu_group; ++i) { - ssize_t len = cpumap_print_to_pagebuf(true, page_buf, - cpu_groups[i]); - /* Remove trailing newline. */ - page_buf[len - 1] = '\0'; - pr_info("Trying to turn off and on again group %d (CPUs %s)\n", - i, page_buf); - err += down_and_up_cpus(cpu_groups[i], offlined_cpus); - } - - free_page((unsigned long)page_buf); -out_free_cpu_groups: - free_cpu_groups(nb_cpu_group, &cpu_groups); -out_free_cpus: - free_cpumask_var(offlined_cpus); - return err; -} - -static void dummy_callback(struct timer_list *unused) {} - -static int suspend_cpu(int index, bool broadcast) -{ - int ret; - - arch_cpu_idle_enter(); - - if (broadcast) { - /* - * The local timer will be shut down, we need to enter tick - * broadcast. - */ - ret = tick_broadcast_enter(); - if (ret) { - /* - * In the absence of hardware broadcast mechanism, - * this CPU might be used to broadcast wakeups, which - * may be why entering tick broadcast has failed. - * There is little the kernel can do to work around - * that, so enter WFI instead (idle state 0). - */ - cpu_do_idle(); - ret = 0; - goto out_arch_exit; - } - } - - /* - * Replicate the common ARM cpuidle enter function - * (arm_enter_idle_state). - */ - ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); - - if (broadcast) - tick_broadcast_exit(); - -out_arch_exit: - arch_cpu_idle_exit(); - - return ret; -} - -static int suspend_test_thread(void *arg) -{ - int cpu = (long)arg; - int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; - struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 }; - struct cpuidle_device *dev; - struct cpuidle_driver *drv; - /* No need for an actual callback, we just want to wake up the CPU. */ - struct timer_list wakeup_timer; - - /* Wait for the main thread to give the start signal. */ - wait_for_completion(&suspend_threads_started); - - /* Set maximum priority to preempt all other threads on this CPU. */ - if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) - pr_warn("Failed to set suspend thread scheduler on CPU %d\n", - cpu); - - dev = this_cpu_read(cpuidle_devices); - drv = cpuidle_get_cpu_driver(dev); - - pr_info("CPU %d entering suspend cycles, states 1 through %d\n", - cpu, drv->state_count - 1); - - timer_setup_on_stack(&wakeup_timer, dummy_callback, 0); - for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { - int index; - /* - * Test all possible states, except 0 (which is usually WFI and - * doesn't use PSCI). - */ - for (index = 1; index < drv->state_count; ++index) { - struct cpuidle_state *state = &drv->states[index]; - bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; - int ret; - - /* - * Set the timer to wake this CPU up in some time (which - * should be largely sufficient for entering suspend). - * If the local tick is disabled when entering suspend, - * suspend_cpu() takes care of switching to a broadcast - * tick, so the timer will still wake us up. - */ - mod_timer(&wakeup_timer, jiffies + - usecs_to_jiffies(state->target_residency)); - - /* IRQs must be disabled during suspend operations. */ - local_irq_disable(); - - ret = suspend_cpu(index, broadcast); - - /* - * We have woken up. Re-enable IRQs to handle any - * pending interrupt, do not wait until the end of the - * loop. - */ - local_irq_enable(); - - if (ret == index) { - ++nb_suspend; - } else if (ret >= 0) { - /* We did not enter the expected state. */ - ++nb_shallow_sleep; - } else { - pr_err("Failed to suspend CPU %d: error %d " - "(requested state %d, cycle %d)\n", - cpu, ret, index, i); - ++nb_err; - } - } - } - - /* - * Disable the timer to make sure that the timer will not trigger - * later. - */ - del_timer(&wakeup_timer); - destroy_timer_on_stack(&wakeup_timer); - - if (atomic_dec_return_relaxed(&nb_active_threads) == 0) - complete(&suspend_threads_done); - - /* Give up on RT scheduling and wait for termination. */ - sched_priority.sched_priority = 0; - if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) - pr_warn("Failed to set suspend thread scheduler on CPU %d\n", - cpu); - for (;;) { - /* Needs to be set first to avoid missing a wakeup. */ - set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) { - __set_current_state(TASK_RUNNING); - break; - } - schedule(); - } - - pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", - cpu, nb_suspend, nb_shallow_sleep, nb_err); - - return nb_err; -} - -static int suspend_tests(void) -{ - int i, cpu, err = 0; - struct task_struct **threads; - int nb_threads = 0; - - threads = kmalloc_array(nb_available_cpus, sizeof(*threads), - GFP_KERNEL); - if (!threads) - return -ENOMEM; - - /* - * Stop cpuidle to prevent the idle tasks from entering a deep sleep - * mode, as it might interfere with the suspend threads on other CPUs. - * This does not prevent the suspend threads from using cpuidle (only - * the idle tasks check this status). Take the idle lock so that - * the cpuidle driver and device look-up can be carried out safely. - */ - cpuidle_pause_and_lock(); - - for_each_online_cpu(cpu) { - struct task_struct *thread; - /* Check that cpuidle is available on that CPU. */ - struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); - struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); - - if (!dev || !drv) { - pr_warn("cpuidle not available on CPU %d, ignoring\n", - cpu); - continue; - } - - thread = kthread_create_on_cpu(suspend_test_thread, - (void *)(long)cpu, cpu, - "psci_suspend_test"); - if (IS_ERR(thread)) - pr_err("Failed to create kthread on CPU %d\n", cpu); - else - threads[nb_threads++] = thread; - } - - if (nb_threads < 1) { - err = -ENODEV; - goto out; - } - - atomic_set(&nb_active_threads, nb_threads); - - /* - * Wake up the suspend threads. To avoid the main thread being preempted - * before all the threads have been unparked, the suspend threads will - * wait for the completion of suspend_threads_started. - */ - for (i = 0; i < nb_threads; ++i) - wake_up_process(threads[i]); - complete_all(&suspend_threads_started); - - wait_for_completion(&suspend_threads_done); - - - /* Stop and destroy all threads, get return status. */ - for (i = 0; i < nb_threads; ++i) - err += kthread_stop(threads[i]); - out: - cpuidle_resume_and_unlock(); - kfree(threads); - return err; -} - -static int __init psci_checker(void) -{ - int ret; - - /* - * Since we're in an initcall, we assume that all the CPUs that all - * CPUs that can be onlined have been onlined. - * - * The tests assume that hotplug is enabled but nobody else is using it, - * otherwise the results will be unpredictable. However, since there - * is no userspace yet in initcalls, that should be fine, as long as - * no torture test is running at the same time (see Kconfig). - */ - nb_available_cpus = num_online_cpus(); - - /* Check PSCI operations are set up and working. */ - ret = psci_ops_check(); - if (ret) - return ret; - - pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); - - pr_info("Starting hotplug tests\n"); - ret = hotplug_tests(); - if (ret == 0) - pr_info("Hotplug tests passed OK\n"); - else if (ret > 0) - pr_err("%d error(s) encountered in hotplug tests\n", ret); - else { - pr_err("Out of memory\n"); - return ret; - } - - pr_info("Starting suspend tests (%d cycles per state)\n", - NUM_SUSPEND_CYCLE); - ret = suspend_tests(); - if (ret == 0) - pr_info("Suspend tests passed OK\n"); - else if (ret > 0) - pr_err("%d error(s) encountered in suspend tests\n", ret); - else { - switch (ret) { - case -ENOMEM: - pr_err("Out of memory\n"); - break; - case -ENODEV: - pr_warn("Could not start suspend tests on any CPU\n"); - break; - } - } - - pr_info("PSCI checker completed\n"); - return ret < 0 ? ret : 0; -} -late_initcall(psci_checker); -- cgit v1.2.3