summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-04 22:53:22 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-04 22:53:22 +0200
commit82f663277d0db854e8978e5f89fd88f6df75a4a4 (patch)
treee7164d30fbde033b1ac30a28a83f82e3382a2c36 /kernel/sched
parentLinux 4.1-rc2 (diff)
downloadlinux-82f663277d0db854e8978e5f89fd88f6df75a4a4.tar.xz
linux-82f663277d0db854e8978e5f89fd88f6df75a4a4.zip
sched / idle: Move the default idle call code to a separate function
Move the code under the "use_default" label in cpuidle_idle_call() into a separate (new) function. This just allows the subsequent changes to be more stratightforward. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Daniel Lezcano <daniel.lezcano@linaro.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/idle.c43
1 files changed, 24 insertions, 19 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index fefcb1fa5160..ae7c0be90d16 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -67,6 +67,18 @@ void __weak arch_cpu_idle(void)
local_irq_enable();
}
+static void default_idle_call(void)
+{
+ /*
+ * We can't use the cpuidle framework, let's use the default idle
+ * routine.
+ */
+ if (current_clr_polling_and_test())
+ local_irq_enable();
+ else
+ arch_cpu_idle();
+}
+
/**
* cpuidle_idle_call - the main idle function
*
@@ -105,8 +117,10 @@ static void cpuidle_idle_call(void)
*/
rcu_idle_enter();
- if (cpuidle_not_available(drv, dev))
- goto use_default;
+ if (cpuidle_not_available(drv, dev)) {
+ default_idle_call();
+ goto exit_idle;
+ }
/*
* Suspend-to-idle ("freeze") is a system state in which all user space
@@ -134,8 +148,10 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_select(drv, dev);
}
/* Fall back to the default arch idle method on errors. */
- if (next_state < 0)
- goto use_default;
+ if (next_state < 0) {
+ default_idle_call();
+ goto exit_idle;
+ }
/*
* The idle task must be scheduled, it is pointless to
@@ -162,8 +178,10 @@ static void cpuidle_idle_call(void)
/* The cpu is no longer idle or about to enter idle. */
idle_set_state(this_rq(), NULL);
- if (entered_state == -EBUSY)
- goto use_default;
+ if (entered_state == -EBUSY) {
+ default_idle_call();
+ goto exit_idle;
+ }
/*
* Give the governor an opportunity to reflect on the outcome
@@ -182,19 +200,6 @@ exit_idle:
rcu_idle_exit();
start_critical_timings();
- return;
-
-use_default:
- /*
- * We can't use the cpuidle framework, let's use the default
- * idle routine.
- */
- if (current_clr_polling_and_test())
- local_irq_enable();
- else
- arch_cpu_idle();
-
- goto exit_idle;
}
DEFINE_PER_CPU(bool, cpu_dead_idle);