diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-04-11 13:55:48 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-08 09:16:59 +0200 |
commit | 37352273ad48f2d177ed1b06ced32d5536b773fb (patch) | |
tree | 245d7410b60c5ab32f1b1103d40bcf56d9f941bd /kernel/sched/idle.c | |
parent | sched/idle: Delay clearing the polling bit (diff) | |
download | linux-37352273ad48f2d177ed1b06ced32d5536b773fb.tar.xz linux-37352273ad48f2d177ed1b06ced32d5536b773fb.zip |
sched/idle: Reflow cpuidle_idle_call()
Apply goto to reduce lines and nesting levels.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-cc6vb0snt3sr7op6rlbfeqfh@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/idle.c')
-rw-r--r-- | kernel/sched/idle.c | 131 |
1 files changed, 58 insertions, 73 deletions
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index ed67f0cd2906..88a6bc43738b 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -73,7 +73,7 @@ static int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); - int next_state, entered_state, ret; + int next_state, entered_state; bool broadcast; /* @@ -102,90 +102,75 @@ static int cpuidle_idle_call(void) * Check if the cpuidle framework is ready, otherwise fallback * to the default arch specific idle method */ - ret = cpuidle_enabled(drv, dev); - - if (!ret) { - /* - * Ask the governor to choose an idle state it thinks - * it is convenient to go to. There is *always* a - * convenient idle state - */ - next_state = cpuidle_select(drv, dev); - + if (cpuidle_enabled(drv, dev)) { +use_default: /* - * The idle task must be scheduled, it is pointless to - * go to idle, just update no idle residency and get - * out of this function + * We can't use the cpuidle framework, let's use the default + * idle routine. */ - if (current_clr_polling_and_test()) { - dev->last_residency = 0; - entered_state = next_state; + if (current_clr_polling_and_test()) local_irq_enable(); - } else { - broadcast = !!(drv->states[next_state].flags & - CPUIDLE_FLAG_TIMER_STOP); - - if (broadcast) { - /* - * Tell the time framework to switch - * to a broadcast timer because our - * local timer will be shutdown. If a - * local timer is used from another - * cpu as a broadcast timer, this call - * may fail if it is not available - */ - ret = clockevents_notify( - CLOCK_EVT_NOTIFY_BROADCAST_ENTER, - &dev->cpu); - } - - if (!ret) { - trace_cpu_idle_rcuidle(next_state, dev->cpu); - - /* - * Enter the idle state previously - * returned by the governor - * decision. This function will block - * until an interrupt occurs and will - * take care of re-enabling the local - * interrupts - */ - entered_state = cpuidle_enter(drv, dev, - next_state); - - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, - dev->cpu); - - if (broadcast) - clockevents_notify( - CLOCK_EVT_NOTIFY_BROADCAST_EXIT, - &dev->cpu); - - /* - * Give the governor an opportunity to reflect on the - * outcome - */ - cpuidle_reflect(dev, entered_state); - } - } + else + arch_cpu_idle(); + + goto exit_idle; } /* - * We can't use the cpuidle framework, let's use the default - * idle routine + * Ask the governor to choose an idle state it thinks + * it is convenient to go to. There is *always* a + * convenient idle state */ - if (ret) { - if (!current_clr_polling_and_test()) - arch_cpu_idle(); - else - local_irq_enable(); + next_state = cpuidle_select(drv, dev); + + /* + * The idle task must be scheduled, it is pointless to + * go to idle, just update no idle residency and get + * out of this function + */ + if (current_clr_polling_and_test()) { + dev->last_residency = 0; + entered_state = next_state; + local_irq_enable(); + goto exit_idle; } + broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); + + /* + * Tell the time framework to switch to a broadcast timer + * because our local timer will be shutdown. If a local timer + * is used from another cpu as a broadcast timer, this call may + * fail if it is not available + */ + if (broadcast && + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu)) + goto use_default; + + trace_cpu_idle_rcuidle(next_state, dev->cpu); + + /* + * Enter the idle state previously returned by the governor decision. + * This function will block until an interrupt occurs and will take + * care of re-enabling the local interrupts + */ + entered_state = cpuidle_enter(drv, dev, next_state); + + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + + if (broadcast) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + + /* + * Give the governor an opportunity to reflect on the outcome + */ + cpuidle_reflect(dev, entered_state); + +exit_idle: __current_set_polling(); /* - * It is up to the idle functions to enable back the local - * interrupt + * It is up to the idle functions to reenable local interrupts */ if (WARN_ON_ONCE(irqs_disabled())) local_irq_enable(); |