From f533c3d340536198a4889a42a68d6c0d79a504e7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 16 Oct 2009 17:20:58 +0900 Subject: sh: Idle loop chainsawing for SMP-based light sleep. This does a bit of chainsawing of the idle loop code to get light sleep working on SMP. Previously this was forcing secondary CPUs in to sleep mode with them not coming back if they didn't have their own local timers. Given that we use clockevents broadcasting by default, the CPU managing the clockevents can't have IRQs disabled before entering its sleep state. This unfortunately leaves us with the age-old need_resched() race in between local_irq_enable() and cpu_sleep(), but at present this is unavoidable. After some more experimentation it may be possible to layer on SR.BL bit manipulation over top of this scheme to inhibit the race condition, but given the current potential for missing wakeups, this is left as a future exercise. Signed-off-by: Paul Mundt --- arch/sh/kernel/idle.c | 73 ++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 57 insertions(+), 16 deletions(-) (limited to 'arch/sh/kernel/idle.c') diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 27ff2dc093c7..8e61241230cb 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -21,7 +21,7 @@ #include static int hlt_counter; -void (*pm_idle)(void); +void (*pm_idle)(void) = NULL; void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -39,41 +39,68 @@ static int __init hlt_setup(char *__unused) } __setup("hlt", hlt_setup); +static inline int hlt_works(void) +{ + return !hlt_counter; +} + +/* + * On SMP it's slightly faster (but much more power-consuming!) + * to poll the ->work.need_resched flag instead of waiting for the + * cross-CPU IPI to arrive. Use this option with caution. + */ +static void poll_idle(void) +{ + local_irq_enable(); + while (!need_resched()) + cpu_relax(); +} + void default_idle(void) { - if (!hlt_counter) { + if (hlt_works()) { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); - set_bl_bit(); - stop_critical_timings(); - while (!need_resched()) + if (!need_resched()) { + local_irq_enable(); cpu_sleep(); + } - start_critical_timings(); - clear_bl_bit(); set_thread_flag(TIF_POLLING_NRFLAG); } else - while (!need_resched()) - cpu_relax(); + poll_idle(); } +/* + * The idle thread. There's no useful work to be done, so just try to conserve + * power and have a low exit latency (ie sit in a loop waiting for somebody to + * say that they'd like to reschedule) + */ void cpu_idle(void) { + unsigned int cpu = smp_processor_id(); + set_thread_flag(TIF_POLLING_NRFLAG); /* endless idle loop with no priority at all */ while (1) { - void (*idle)(void) = pm_idle; + tick_nohz_stop_sched_tick(1); - if (!idle) - idle = default_idle; + while (!need_resched() && cpu_online(cpu)) { + local_irq_disable(); + /* Don't trace irqs off for idle */ + stop_critical_timings(); + pm_idle(); + /* + * Sanity check to ensure that pm_idle() returns + * with IRQs enabled + */ + WARN_ON(irqs_disabled()); + start_critical_timings(); + } - tick_nohz_stop_sched_tick(1); - while (!need_resched()) - idle(); tick_nohz_restart_sched_tick(); - preempt_enable_no_resched(); schedule(); preempt_disable(); @@ -81,6 +108,20 @@ void cpu_idle(void) } } +void __cpuinit select_idle_routine(void) +{ + /* + * If a platform has set its own idle routine, leave it alone. + */ + if (pm_idle) + return; + + if (hlt_works()) + pm_idle = default_idle; + else + pm_idle = poll_idle; +} + static void do_nothing(void *unused) { } -- cgit v1.2.3 From 0e6d4986e7940125a04ba8c3aa558f3b248cb9b4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 16 Oct 2009 17:27:58 +0900 Subject: sh: Make check_pgt_cache() more aggressive while idling. This follows the x86 change and moves check_pgt_cache() up under the !need_resched() tight loop, rather than simply calling in to it when exiting idle. Signed-off-by: Paul Mundt --- arch/sh/kernel/idle.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel/idle.c') diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 8e61241230cb..3243eb23e842 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -88,6 +88,9 @@ void cpu_idle(void) tick_nohz_stop_sched_tick(1); while (!need_resched() && cpu_online(cpu)) { + check_pgt_cache(); + rmb(); + local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); @@ -104,7 +107,6 @@ void cpu_idle(void) preempt_enable_no_resched(); schedule(); preempt_disable(); - check_pgt_cache(); } } -- cgit v1.2.3 From 9dbe00a56a60748668d2040cf4e59427060e2252 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 16 Oct 2009 17:55:59 +0900 Subject: sh: Fix up IRQ re-enabling for the need_resched() case. In the case where need_resched() is set in between the cpu_idle() and pm_idle() calls we were missing an else case for just re-enabling local IRQs and bailing out. This was noticed by the irqs_disabled() warning, even though IRQs were being re-enabled elsewhere. Signed-off-by: Paul Mundt --- arch/sh/kernel/idle.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel/idle.c') diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 3243eb23e842..aaff0037fcd7 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -65,7 +65,8 @@ void default_idle(void) if (!need_resched()) { local_irq_enable(); cpu_sleep(); - } + } else + local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); } else -- cgit v1.2.3