diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-17 23:17:51 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-17 23:17:51 +0100 |
commit | 99fa0ad92c4fd8b529c89b3640b42323984be761 (patch) | |
tree | ba4ef84ab54c2c5636521c1b1ae4e9056d46c75c /drivers/acpi | |
parent | Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmo... (diff) | |
parent | ACPI / idle: Implement ->enter_freeze callback routine (diff) | |
download | linux-99fa0ad92c4fd8b529c89b3640b42323984be761.tar.xz linux-99fa0ad92c4fd8b529c89b3640b42323984be761.zip |
Merge tag 'suspend-to-idle-3.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull suspend-to-idle updates from Rafael Wysocki:
"Suspend-to-idle timer quiescing support for v3.20-rc1
Until now suspend-to-idle has not been able to save much more energy
than runtime PM because of timer interrupts that periodically bring
CPUs out of idle while they are waiting for a wakeup interrupt. Of
course, the timer interrupts are not wakeup ones, so the handling of
them can be deferred until a real wakeup interrupt happens, but at the
same time we don't want to mass-expire timers at that point.
The solution is to suspend the entire timekeeping when the last CPU is
entering an idle state and resume it when the first CPU goes out of
idle. That has to be done with care, though, so as to avoid accessing
suspended clocksources etc. end we need extra support from idle
drivers for that.
This series of commits adds support for quiescing timers during
suspend-to-idle and adds the requisite callbacks to intel_idle and the
ACPI cpuidle driver"
* tag 'suspend-to-idle-3.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
ACPI / idle: Implement ->enter_freeze callback routine
intel_idle: Add ->enter_freeze callbacks
PM / sleep: Make it possible to quiesce timers during suspend-to-idle
timekeeping: Make it safe to use the fast timekeeper while suspended
timekeeping: Pass readout base to update_fast_timekeeper()
PM / sleep: Re-implement suspend-to-idle handling
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/processor_idle.c | 48 |
1 files changed, 40 insertions, 8 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c256bd7fbd78..c6bb9f1257c9 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -732,9 +732,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) { - return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 && - !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) && - !pr->flags.has_cst; + return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && + !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); } static int c3_cpu_count; @@ -744,9 +743,10 @@ static DEFINE_RAW_SPINLOCK(c3_lock); * acpi_idle_enter_bm - enters C3 with proper BM handling * @pr: Target processor * @cx: Target state context + * @timer_bc: Whether or not to change timer mode to broadcast */ static void acpi_idle_enter_bm(struct acpi_processor *pr, - struct acpi_processor_cx *cx) + struct acpi_processor_cx *cx, bool timer_bc) { acpi_unlazy_tlb(smp_processor_id()); @@ -754,7 +754,8 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr, * Must be done before busmaster disable as we might need to * access HPET ! */ - lapic_timer_state_broadcast(pr, cx, 1); + if (timer_bc) + lapic_timer_state_broadcast(pr, cx, 1); /* * disable bus master @@ -784,7 +785,8 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr, raw_spin_unlock(&c3_lock); } - lapic_timer_state_broadcast(pr, cx, 0); + if (timer_bc) + lapic_timer_state_broadcast(pr, cx, 0); } static int acpi_idle_enter(struct cpuidle_device *dev, @@ -798,12 +800,12 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return -EINVAL; if (cx->type != ACPI_STATE_C1) { - if (acpi_idle_fallback_to_c1(pr)) { + if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { index = CPUIDLE_DRIVER_STATE_START; cx = per_cpu(acpi_cstate[index], dev->cpu); } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { if (cx->bm_sts_skip || !acpi_idle_bm_check()) { - acpi_idle_enter_bm(pr, cx); + acpi_idle_enter_bm(pr, cx, true); return index; } else if (drv->safe_state_index >= 0) { index = drv->safe_state_index; @@ -827,6 +829,27 @@ static int acpi_idle_enter(struct cpuidle_device *dev, return index; } +static void acpi_idle_enter_freeze(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); + + if (cx->type == ACPI_STATE_C3) { + struct acpi_processor *pr = __this_cpu_read(processors); + + if (unlikely(!pr)) + return; + + if (pr->flags.bm_check) { + acpi_idle_enter_bm(pr, cx, false); + return; + } else { + ACPI_FLUSH_CPU_CACHE(); + } + } + acpi_idle_do_entry(cx); +} + struct cpuidle_driver acpi_idle_driver = { .name = "acpi_idle", .owner = THIS_MODULE, @@ -925,6 +948,15 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) state->enter_dead = acpi_idle_play_dead; drv->safe_state_index = count; } + /* + * Halt-induced C1 is not good for ->enter_freeze, because it + * re-enables interrupts on exit. Moreover, C1 is generally not + * particularly interesting from the suspend-to-idle angle, so + * avoid C1 and the situations in which we may need to fall back + * to it altogether. + */ + if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) + state->enter_freeze = acpi_idle_enter_freeze; count++; if (count == CPUIDLE_STATE_MAX) |