summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-11-13 01:34:14 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2017-11-13 01:34:14 +0100
commit622ade3a2ff0fc4c026f116ea6018eddaeb49925 (patch)
tree44d62d3929632a0f1a802decc6b1f1f680ce658f
parentMerge branch 'pm-qos' (diff)
parentintel_idle: Graceful probe failure when MWAIT is disabled (diff)
downloadlinux-622ade3a2ff0fc4c026f116ea6018eddaeb49925.tar.xz
linux-622ade3a2ff0fc4c026f116ea6018eddaeb49925.zip
Merge branch 'pm-cpuidle'
* pm-cpuidle: intel_idle: Graceful probe failure when MWAIT is disabled cpuidle: Avoid assignment in if () argument cpuidle: Clean up cpuidle_enable_device() error handling a bit cpuidle: ladder: Add per CPU PM QoS resume latency support ARM: cpuidle: Refactor rollback operations if init fails ARM: cpuidle: Correct driver unregistration if init fails intel_idle: replace conditionals with static_cpu_has(X86_FEATURE_ARAT) cpuidle: fix broadcast control when broadcast can not be entered Conflicts: drivers/idle/intel_idle.c
-rw-r--r--drivers/cpuidle/cpuidle-arm.c153
-rw-r--r--drivers/cpuidle/cpuidle.c14
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/idle/intel_idle.c23
4 files changed, 122 insertions, 75 deletions
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 52a75053ee03..ddee1b601b89 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -72,12 +72,94 @@ static const struct of_device_id arm_idle_state_match[] __initconst = {
};
/*
- * arm_idle_init
+ * arm_idle_init_cpu
*
* Registers the arm specific cpuidle driver with the cpuidle
* framework. It relies on core code to parse the idle states
* and initialize them using driver data structures accordingly.
*/
+static int __init arm_idle_init_cpu(int cpu)
+{
+ int ret;
+ struct cpuidle_driver *drv;
+ struct cpuidle_device *dev;
+
+ drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+
+ /*
+ * Initialize idle states data, starting at index 1. This
+ * driver is DT only, if no DT idle states are detected (ret
+ * == 0) let the driver initialization fail accordingly since
+ * there is no reason to initialize the idle driver if only
+ * wfi is supported.
+ */
+ ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
+ if (ret <= 0) {
+ ret = ret ? : -ENODEV;
+ goto out_kfree_drv;
+ }
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("Failed to register cpuidle driver\n");
+ goto out_kfree_drv;
+ }
+
+ /*
+ * Call arch CPU operations in order to initialize
+ * idle states suspend back-end specific data
+ */
+ ret = arm_cpuidle_init(cpu);
+
+ /*
+ * Skip the cpuidle device initialization if the reported
+ * failure is a HW misconfiguration/breakage (-ENXIO).
+ */
+ if (ret == -ENXIO)
+ return 0;
+
+ if (ret) {
+ pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+ goto out_unregister_drv;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ pr_err("Failed to allocate cpuidle device\n");
+ ret = -ENOMEM;
+ goto out_unregister_drv;
+ }
+ dev->cpu = cpu;
+
+ ret = cpuidle_register_device(dev);
+ if (ret) {
+ pr_err("Failed to register cpuidle device for CPU %d\n",
+ cpu);
+ goto out_kfree_dev;
+ }
+
+ return 0;
+
+out_kfree_dev:
+ kfree(dev);
+out_unregister_drv:
+ cpuidle_unregister_driver(drv);
+out_kfree_drv:
+ kfree(drv);
+ return ret;
+}
+
+/*
+ * arm_idle_init - Initializes arm cpuidle driver
+ *
+ * Initializes arm cpuidle driver for all CPUs, if any CPU fails
+ * to register cpuidle driver then rollback to cancel all CPUs
+ * registeration.
+ */
static int __init arm_idle_init(void)
{
int cpu, ret;
@@ -85,79 +167,20 @@ static int __init arm_idle_init(void)
struct cpuidle_device *dev;
for_each_possible_cpu(cpu) {
-
- drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
- if (!drv) {
- ret = -ENOMEM;
- goto out_fail;
- }
-
- drv->cpumask = (struct cpumask *)cpumask_of(cpu);
-
- /*
- * Initialize idle states data, starting at index 1. This
- * driver is DT only, if no DT idle states are detected (ret
- * == 0) let the driver initialization fail accordingly since
- * there is no reason to initialize the idle driver if only
- * wfi is supported.
- */
- ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
- if (ret <= 0) {
- ret = ret ? : -ENODEV;
- goto init_fail;
- }
-
- ret = cpuidle_register_driver(drv);
- if (ret) {
- pr_err("Failed to register cpuidle driver\n");
- goto init_fail;
- }
-
- /*
- * Call arch CPU operations in order to initialize
- * idle states suspend back-end specific data
- */
- ret = arm_cpuidle_init(cpu);
-
- /*
- * Skip the cpuidle device initialization if the reported
- * failure is a HW misconfiguration/breakage (-ENXIO).
- */
- if (ret == -ENXIO)
- continue;
-
- if (ret) {
- pr_err("CPU %d failed to init idle CPU ops\n", cpu);
- goto out_fail;
- }
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- pr_err("Failed to allocate cpuidle device\n");
- ret = -ENOMEM;
+ ret = arm_idle_init_cpu(cpu);
+ if (ret)
goto out_fail;
- }
- dev->cpu = cpu;
-
- ret = cpuidle_register_device(dev);
- if (ret) {
- pr_err("Failed to register cpuidle device for CPU %d\n",
- cpu);
- kfree(dev);
- goto out_fail;
- }
}
return 0;
-init_fail:
- kfree(drv);
+
out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
+ drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister_device(dev);
- kfree(dev);
- drv = cpuidle_get_driver();
cpuidle_unregister_driver(drv);
+ kfree(dev);
kfree(drv);
}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 484cc8909d5c..68a16827f45f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -208,6 +208,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
return -EBUSY;
}
target_state = &drv->states[index];
+ broadcast = false;
}
/* Take note of the planned idle state. */
@@ -387,9 +388,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (dev->enabled)
return 0;
+ if (!cpuidle_curr_governor)
+ return -EIO;
+
drv = cpuidle_get_cpu_driver(dev);
- if (!drv || !cpuidle_curr_governor)
+ if (!drv)
return -EIO;
if (!dev->registered)
@@ -399,9 +403,11 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (ret)
return ret;
- if (cpuidle_curr_governor->enable &&
- (ret = cpuidle_curr_governor->enable(drv, dev)))
- goto fail_sysfs;
+ if (cpuidle_curr_governor->enable) {
+ ret = cpuidle_curr_governor->enable(drv, dev);
+ if (ret)
+ goto fail_sysfs;
+ }
smp_wmb();
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ce1a2ffffb2a..1ad8745fd6d6 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -17,6 +17,7 @@
#include <linux/pm_qos.h>
#include <linux/jiffies.h>
#include <linux/tick.h>
+#include <linux/cpu.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -67,10 +68,16 @@ static int ladder_select_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
+ struct device *device = get_cpu_device(dev->cpu);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ int resume_latency = dev_pm_qos_raw_read_value(device);
+
+ if (resume_latency < latency_req &&
+ resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */
if (unlikely(latency_req == 0)) {
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index f0b06b14e782..b2ccce5fb071 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -913,10 +913,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_state *state = &drv->states[index];
unsigned long eax = flg2MWAIT(state->flags);
unsigned int cstate;
+ bool uninitialized_var(tick);
int cpu = smp_processor_id();
- cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
-
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
@@ -924,12 +923,19 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
leave_mm(cpu);
- if (!(lapic_timer_reliable_states & (1 << (cstate))))
- tick_broadcast_enter();
+ if (!static_cpu_has(X86_FEATURE_ARAT)) {
+ cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
+ MWAIT_CSTATE_MASK) + 1;
+ tick = false;
+ if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
+ tick = true;
+ tick_broadcast_enter();
+ }
+ }
mwait_idle_with_hints(eax, ecx);
- if (!(lapic_timer_reliable_states & (1 << (cstate))))
+ if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
tick_broadcast_exit();
return index;
@@ -1061,7 +1067,7 @@ static const struct idle_cpu idle_cpu_dnv = {
};
#define ICPU(model, cpu) \
- { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&cpu }
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_NEHALEM_EP, idle_cpu_nehalem),
@@ -1125,6 +1131,11 @@ static int __init intel_idle_probe(void)
return -ENODEV;
}
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_debug("Please enable MWAIT in BIOS SETUP\n");
+ return -ENODEV;
+ }
+
if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
return -ENODEV;