From 62b7494209495847269a6ce0504cbefd23d42eb1 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Tue, 16 Jun 2009 10:30:40 +0200 Subject: [S390] pm: power management support for SCLP drivers. The SCLP base driver defines a new notifier call back for all upper level SCLP drivers, like the SCLP console, etc. This guarantees that in suspend first the upper level drivers are suspended and afterwards the SCLP base driver. For resume it is the other way round. The SCLP base driver itself registers a new platform device at the platform bus and gets PM notifications via the dev_pm_ops. In suspend, the SCLP base driver switches off the receiver and sender mask This is done in sclp_deactivate(). After suspend all new requests will be rejected with -EIO and no more interrupts will be received, because the masks are switched off. For resume the sender and receiver masks are reset in the sclp_reactivate() function. When the SCLP console is suspended, all new messages are cached in the sclp console buffers. In resume, all the cached messages are written to the console. In addition to that we have an early resume function that removes the cached messages from the suspend image. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- drivers/s390/char/sclp_vt220.c | 118 +++++++++++++++++++++++++++-------------- 1 file changed, 79 insertions(+), 39 deletions(-) (limited to 'drivers/s390/char/sclp_vt220.c') diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index a839aa531d7c..5518e24946aa 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c @@ -1,10 +1,9 @@ /* - * drivers/s390/char/sclp_vt220.c - * SCLP VT220 terminal driver. + * SCLP VT220 terminal driver. * - * S390 version - * Copyright IBM Corp. 2003,2008 - * Author(s): Peter Oberparleiter + * Copyright IBM Corp. 2003, 2009 + * + * Author(s): Peter Oberparleiter */ #include @@ -69,8 +68,11 @@ static struct list_head sclp_vt220_empty; /* List of pending requests */ static struct list_head sclp_vt220_outqueue; -/* Number of requests in outqueue */ -static int sclp_vt220_outqueue_count; +/* Suspend mode flag */ +static int sclp_vt220_suspended; + +/* Flag that output queue is currently running */ +static int sclp_vt220_queue_running; /* Timer used for delaying write requests to merge subsequent messages into * a single buffer */ @@ -92,6 +94,8 @@ static int __initdata sclp_vt220_init_count; static int sclp_vt220_flush_later; static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf); +static void sclp_vt220_pm_event_fn(struct sclp_register *reg, + enum sclp_pm_event sclp_pm_event); static int __sclp_vt220_emit(struct sclp_vt220_request *request); static void sclp_vt220_emit_current(void); @@ -100,7 +104,8 @@ static struct sclp_register sclp_vt220_register = { .send_mask = EVTYP_VT220MSG_MASK, .receive_mask = EVTYP_VT220MSG_MASK, .state_change_fn = NULL, - .receiver_fn = sclp_vt220_receiver_fn + .receiver_fn = sclp_vt220_receiver_fn, + .pm_event_fn = sclp_vt220_pm_event_fn, }; @@ -120,15 +125,19 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request) spin_lock_irqsave(&sclp_vt220_lock, flags); /* Move request from outqueue to empty queue */ list_del(&request->list); - sclp_vt220_outqueue_count--; list_add_tail((struct list_head *) page, &sclp_vt220_empty); /* Check if there is a pending buffer on the out queue. */ request = NULL; if (!list_empty(&sclp_vt220_outqueue)) request = list_entry(sclp_vt220_outqueue.next, struct sclp_vt220_request, list); + if (!request || sclp_vt220_suspended) { + sclp_vt220_queue_running = 0; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + break; + } spin_unlock_irqrestore(&sclp_vt220_lock, flags); - } while (request && __sclp_vt220_emit(request)); + } while (__sclp_vt220_emit(request)); if (request == NULL && sclp_vt220_flush_later) sclp_vt220_emit_current(); /* Check if the tty needs a wake up call */ @@ -212,26 +221,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request) } /* - * Queue and emit given request. - */ -static void -sclp_vt220_emit(struct sclp_vt220_request *request) -{ - unsigned long flags; - int count; - - spin_lock_irqsave(&sclp_vt220_lock, flags); - list_add_tail(&request->list, &sclp_vt220_outqueue); - count = sclp_vt220_outqueue_count++; - spin_unlock_irqrestore(&sclp_vt220_lock, flags); - /* Emit only the first buffer immediately - callback takes care of - * the rest */ - if (count == 0 && __sclp_vt220_emit(request)) - sclp_vt220_process_queue(request); -} - -/* - * Queue and emit current request. Return zero on success, non-zero otherwise. + * Queue and emit current request. */ static void sclp_vt220_emit_current(void) @@ -241,22 +231,33 @@ sclp_vt220_emit_current(void) struct sclp_vt220_sccb *sccb; spin_lock_irqsave(&sclp_vt220_lock, flags); - request = NULL; - if (sclp_vt220_current_request != NULL) { + if (sclp_vt220_current_request) { sccb = (struct sclp_vt220_sccb *) sclp_vt220_current_request->sclp_req.sccb; /* Only emit buffers with content */ if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) { - request = sclp_vt220_current_request; + list_add_tail(&sclp_vt220_current_request->list, + &sclp_vt220_outqueue); sclp_vt220_current_request = NULL; if (timer_pending(&sclp_vt220_timer)) del_timer(&sclp_vt220_timer); } sclp_vt220_flush_later = 0; } + if (sclp_vt220_queue_running || sclp_vt220_suspended) + goto out_unlock; + if (list_empty(&sclp_vt220_outqueue)) + goto out_unlock; + request = list_first_entry(&sclp_vt220_outqueue, + struct sclp_vt220_request, list); + sclp_vt220_queue_running = 1; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + + if (__sclp_vt220_emit(request)) + sclp_vt220_process_queue(request); + return; +out_unlock: spin_unlock_irqrestore(&sclp_vt220_lock, flags); - if (request != NULL) - sclp_vt220_emit(request); } #define SCLP_NORMAL_WRITE 0x00 @@ -396,7 +397,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, if (sclp_vt220_current_request == NULL) { while (list_empty(&sclp_vt220_empty)) { spin_unlock_irqrestore(&sclp_vt220_lock, flags); - if (may_fail) + if (may_fail || sclp_vt220_suspended) goto out; else sclp_sync_wait(); @@ -531,7 +532,7 @@ sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) static void sclp_vt220_flush_chars(struct tty_struct *tty) { - if (sclp_vt220_outqueue_count == 0) + if (!sclp_vt220_queue_running) sclp_vt220_emit_current(); else sclp_vt220_flush_later = 1; @@ -635,7 +636,6 @@ static int __init __sclp_vt220_init(int num_pages) init_timer(&sclp_vt220_timer); sclp_vt220_current_request = NULL; sclp_vt220_buffered_chars = 0; - sclp_vt220_outqueue_count = 0; sclp_vt220_tty = NULL; sclp_vt220_flush_later = 0; @@ -736,7 +736,7 @@ static void __sclp_vt220_flush_buffer(void) spin_lock_irqsave(&sclp_vt220_lock, flags); if (timer_pending(&sclp_vt220_timer)) del_timer(&sclp_vt220_timer); - while (sclp_vt220_outqueue_count > 0) { + while (sclp_vt220_queue_running) { spin_unlock_irqrestore(&sclp_vt220_lock, flags); sclp_sync_wait(); spin_lock_irqsave(&sclp_vt220_lock, flags); @@ -744,6 +744,46 @@ static void __sclp_vt220_flush_buffer(void) spin_unlock_irqrestore(&sclp_vt220_lock, flags); } +/* + * Resume console: If there are cached messages, emit them. + */ +static void sclp_vt220_resume(void) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_vt220_lock, flags); + sclp_vt220_suspended = 0; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + sclp_vt220_emit_current(); +} + +/* + * Suspend console: Set suspend flag and flush console + */ +static void sclp_vt220_suspend(void) +{ + unsigned long flags; + + spin_lock_irqsave(&sclp_vt220_lock, flags); + sclp_vt220_suspended = 1; + spin_unlock_irqrestore(&sclp_vt220_lock, flags); + __sclp_vt220_flush_buffer(); +} + +static void sclp_vt220_pm_event_fn(struct sclp_register *reg, + enum sclp_pm_event sclp_pm_event) +{ + switch (sclp_pm_event) { + case SCLP_PM_EVENT_FREEZE: + sclp_vt220_suspend(); + break; + case SCLP_PM_EVENT_RESTORE: + case SCLP_PM_EVENT_THAW: + sclp_vt220_resume(); + break; + } +} + static int sclp_vt220_notify(struct notifier_block *self, unsigned long event, void *data) -- cgit v1.2.3