diff options
author | Johannes Berg <johannes.berg@intel.com> | 2020-12-02 20:58:07 +0100 |
---|---|---|
committer | Richard Weinberger <richard@nod.at> | 2020-12-13 22:22:49 +0100 |
commit | a374b7cb1ea648a27ceaa2dea19aa967725e938b (patch) | |
tree | 3caccdb36c1cc214f10a2f2d2e267acee1f101a6 /arch/um/kernel/irq.c | |
parent | um: Allow PM with suspend-to-idle (diff) | |
download | linux-a374b7cb1ea648a27ceaa2dea19aa967725e938b.tar.xz linux-a374b7cb1ea648a27ceaa2dea19aa967725e938b.zip |
um: Support suspend to RAM
With all the previous bits in place, we can now also support
suspend to RAM, in the sense that everything is suspended,
not just most, including userspace, processes like in s2idle.
Since um_idle_sleep() now waits forever, we can simply call
that to "suspend" the system.
As before, you can wake it up using SIGUSR1 since we're just
in a pause() call that only needs to return.
In order to implement selective resume from certain devices,
and not have any arbitrary device interrupt wake up, suspend
interrupts by removing SIGIO notification (O_ASYNC) from all
the FDs that are not supposed to wake up the system. However,
swap out the handler so we don't actually handle the SIGIO as
an interrupt.
Since we're in pause(), the mere act of receiving SIGIO wakes
us up, and then after things have been restored enough, re-set
O_ASYNC for all previously suspended FDs, reinstall the proper
SIGIO handler, and send SIGIO to self to process anything that
might now be pending.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: Richard Weinberger <richard@nod.at>
Diffstat (limited to 'arch/um/kernel/irq.c')
-rw-r--r-- | arch/um/kernel/irq.c | 88 |
1 files changed, 87 insertions, 1 deletions
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 482269580b79..ea43312cbfd3 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -20,6 +20,7 @@ #include <os.h> #include <irq_user.h> #include <irq_kern.h> +#include <as-layout.h> extern void free_irqs(void); @@ -36,12 +37,14 @@ struct irq_reg { int events; bool active; bool pending; + bool wakeup; }; struct irq_entry { struct list_head list; int fd; struct irq_reg reg[NUM_IRQ_TYPES]; + bool suspended; }; static DEFINE_SPINLOCK(irq_lock); @@ -70,6 +73,11 @@ static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs) } } +void sigio_handler_suspend(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) +{ + /* nothing */ +} + void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) { struct irq_entry *irq_entry; @@ -365,9 +373,86 @@ error: clear_bit(irq, irqs_allocated); return err; } - EXPORT_SYMBOL(um_request_irq); +#ifdef CONFIG_PM_SLEEP +void um_irqs_suspend(void) +{ + struct irq_entry *entry; + unsigned long flags; + + sig_info[SIGIO] = sigio_handler_suspend; + + spin_lock_irqsave(&irq_lock, flags); + list_for_each_entry(entry, &active_fds, list) { + enum um_irq_type t; + bool wake = false; + + for (t = 0; t < NUM_IRQ_TYPES; t++) { + if (!entry->reg[t].events) + continue; + + if (entry->reg[t].wakeup) { + wake = true; + break; + } + } + + if (!wake) { + entry->suspended = true; + os_clear_fd_async(entry->fd); + } + } + spin_unlock_irqrestore(&irq_lock, flags); +} + +void um_irqs_resume(void) +{ + struct irq_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&irq_lock, flags); + list_for_each_entry(entry, &active_fds, list) { + if (entry->suspended) { + int err = os_set_fd_async(entry->fd); + + WARN(err < 0, "os_set_fd_async returned %d\n", err); + entry->suspended = false; + } + } + spin_unlock_irqrestore(&irq_lock, flags); + + sig_info[SIGIO] = sigio_handler; + send_sigio_to_self(); +} + +static int normal_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct irq_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&irq_lock, flags); + list_for_each_entry(entry, &active_fds, list) { + enum um_irq_type t; + + for (t = 0; t < NUM_IRQ_TYPES; t++) { + if (!entry->reg[t].events) + continue; + + if (entry->reg[t].irq != d->irq) + continue; + entry->reg[t].wakeup = on; + goto unlock; + } + } +unlock: + spin_unlock_irqrestore(&irq_lock, flags); + return 0; +} +#else +#define normal_irq_set_wake NULL +#endif + /* * irq_chip must define at least enable/disable and ack when * the edge handler is used. @@ -384,6 +469,7 @@ static struct irq_chip normal_irq_type = { .irq_ack = dummy, .irq_mask = dummy, .irq_unmask = dummy, + .irq_set_wake = normal_irq_set_wake, }; static struct irq_chip alarm_irq_type = { |