diff options
author | Julian Wiedmann <jwi@linux.ibm.com> | 2021-03-15 19:39:20 +0100 |
---|---|---|
committer | Heiko Carstens <hca@linux.ibm.com> | 2021-07-27 09:39:18 +0200 |
commit | d01fad2c6a3d2b4962b9195747b07535d2eb3e41 (patch) | |
tree | cb10c96d95b1cf4d3005ad038e16d6248199d15b /drivers/s390/cio/qdio_main.c | |
parent | s390/qdio: propagate error when cancelling a ccw fails (diff) | |
download | linux-d01fad2c6a3d2b4962b9195747b07535d2eb3e41.tar.xz linux-d01fad2c6a3d2b4962b9195747b07535d2eb3e41.zip |
s390/qdio: remove remaining tasklet & timer code
Both qdio drivers have moved away from using qdio's internal tasklet
and timer mechanisms for Output Queues. Rip out all the leftovers.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Reviewed-by: Benjamin Block <bblock@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 198 |
1 files changed, 5 insertions, 193 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 99f34bdb267b..6ed8a04e099b 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> -#include <linux/timer.h> #include <linux/delay.h> #include <linux/gfp.h> #include <linux/io.h> @@ -373,18 +372,6 @@ static inline int qdio_siga_input(struct qdio_q *q) return (cc) ? -EIO : 0; } -#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) -#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) - -static inline void qdio_sync_queues(struct qdio_q *q) -{ - /* PCI capable outbound queues will also be scanned so sync them too */ - if (pci_out_supported(q->irq_ptr)) - qdio_siga_sync_all(q); - else - qdio_siga_sync_q(q); -} - int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state) { @@ -521,15 +508,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) return 1; } -static inline int qdio_tasklet_schedule(struct qdio_q *q) -{ - if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { - tasklet_schedule(&q->u.out.tasklet); - return 0; - } - return -EPERM; -} - static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start, unsigned int *error) { @@ -595,12 +573,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start, } } -/* all buffers processed? */ -static inline int qdio_outbound_q_done(struct qdio_q *q) -{ - return atomic_read(&q->nr_buf_used) == 0; -} - static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, unsigned long aob) { @@ -644,75 +616,6 @@ retry: return cc; } -void qdio_outbound_tasklet(struct tasklet_struct *t) -{ - struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet); - struct qdio_q *q = container_of(out_q, struct qdio_q, u.out); - unsigned int start = q->first_to_check; - unsigned int error = 0; - int count; - - qperf_inc(q, tasklet_outbound); - WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); - - count = get_outbound_buffer_frontier(q, start, &error); - if (count) { - q->first_to_check = add_buf(start, count); - - if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) { - qperf_inc(q, outbound_handler); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", - start, count); - - q->handler(q->irq_ptr->cdev, error, q->nr, start, - count, q->irq_ptr->int_parm); - } - } - - if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) && - !qdio_outbound_q_done(q)) - goto sched; - - if (q->u.out.pci_out_enabled) - return; - - /* - * Now we know that queue type is either qeth without pci enabled - * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY - * is noticed and outbound_handler is called after some time. - */ - if (qdio_outbound_q_done(q)) - del_timer_sync(&q->u.out.timer); - else - if (!timer_pending(&q->u.out.timer) && - likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) - mod_timer(&q->u.out.timer, jiffies + 10 * HZ); - return; - -sched: - qdio_tasklet_schedule(q); -} - -void qdio_outbound_timer(struct timer_list *t) -{ - struct qdio_q *q = from_timer(q, t, u.out.timer); - - qdio_tasklet_schedule(q); -} - -static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) -{ - struct qdio_q *out; - int i; - - if (!pci_out_supported(irq) || !irq->scan_threshold) - return; - - for_each_output_queue(irq, out, i) - if (!qdio_outbound_q_done(out)) - qdio_tasklet_schedule(out); -} - static inline void qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) { @@ -734,25 +637,11 @@ static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) /* PCI interrupt handler */ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) { - int i; - struct qdio_q *q; - if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; qdio_deliver_irq(irq_ptr); irq_ptr->last_data_irq_time = S390_lowcore.int_clock; - - if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) - return; - - for_each_output_queue(irq_ptr, q, i) { - if (qdio_outbound_q_done(q)) - continue; - if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) - qdio_siga_sync_q(q); - qdio_tasklet_schedule(q); - } } static void qdio_handle_activate_check(struct qdio_irq *irq_ptr, @@ -879,17 +768,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev, } EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); -static void qdio_shutdown_queues(struct qdio_irq *irq_ptr) -{ - struct qdio_q *q; - int i; - - for_each_output_queue(irq_ptr, q, i) { - del_timer_sync(&q->u.out.timer); - tasklet_kill(&q->u.out.tasklet); - } -} - static int qdio_cancel_ccw(struct qdio_irq *irq, int how) { struct ccw_device *cdev = irq->cdev; @@ -949,12 +827,10 @@ int qdio_shutdown(struct ccw_device *cdev, int how) } /* - * Indicate that the device is going down. Scheduling the queue - * tasklets is forbidden from here on. + * Indicate that the device is going down. */ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - qdio_shutdown_queues(irq_ptr); qdio_shutdown_debug_entries(irq_ptr); rc = qdio_cancel_ccw(irq_ptr, how); @@ -1238,12 +1114,10 @@ EXPORT_SYMBOL_GPL(qdio_activate); /** * handle_inbound - reset processed input buffers * @q: queue containing the buffers - * @callflags: flags * @bufnr: first buffer to process * @count: how many buffers are emptied */ -static int handle_inbound(struct qdio_q *q, unsigned int callflags, - int bufnr, int count) +static int handle_inbound(struct qdio_q *q, int bufnr, int count) { int overlap; @@ -1269,16 +1143,13 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, /** * handle_outbound - process filled outbound buffers * @q: queue containing the buffers - * @callflags: flags * @bufnr: first buffer to process * @count: how many buffers are filled * @aob: asynchronous operation block */ -static int handle_outbound(struct qdio_q *q, unsigned int callflags, - unsigned int bufnr, unsigned int count, +static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count, struct qaob *aob) { - const unsigned int scan_threshold = q->irq_ptr->scan_threshold; unsigned char state = 0; int used, rc = 0; @@ -1290,12 +1161,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, if (used == QDIO_MAX_BUFFERS_PER_Q) qperf_inc(q, outbound_queue_full); - if (callflags & QDIO_FLAG_PCI_OUT) { - q->u.out.pci_out_enabled = 1; - qperf_inc(q, pci_request_int); - } else - q->u.out.pci_out_enabled = 0; - if (queue_type(q) == QDIO_IQDIO_QFMT) { unsigned long phys_aob = aob ? virt_to_phys(aob) : 0; @@ -1312,18 +1177,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, rc = qdio_kick_outbound_q(q, count, 0); } - /* Let drivers implement their own completion scanning: */ - if (!scan_threshold) - return rc; - - /* in case of SIGA errors we must process the error immediately */ - if (used >= scan_threshold || rc) - qdio_tasklet_schedule(q); - else - /* free the SBALs in case of no further traffic */ - if (!timer_pending(&q->u.out.timer) && - likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) - mod_timer(&q->u.out.timer, jiffies + HZ); return rc; } @@ -1355,11 +1208,9 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, if (!count) return 0; if (callflags & QDIO_FLAG_SYNC_INPUT) - return handle_inbound(irq_ptr->input_qs[q_nr], - callflags, bufnr, count); + return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count); else if (callflags & QDIO_FLAG_SYNC_OUTPUT) - return handle_outbound(irq_ptr->output_qs[q_nr], - callflags, bufnr, count, aob); + return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob); return -EINVAL; } EXPORT_SYMBOL_GPL(do_QDIO); @@ -1447,45 +1298,6 @@ int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, EXPORT_SYMBOL_GPL(qdio_inspect_queue); /** - * qdio_get_next_buffers - process input buffers - * @cdev: associated ccw_device for the qdio subchannel - * @nr: input queue number - * @bufnr: first filled buffer number - * @error: buffers are in error state - * - * Return codes - * < 0 - error - * = 0 - no new buffers found - * > 0 - number of processed buffers - */ -int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, - int *error) -{ - struct qdio_q *q; - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - - if (!irq_ptr) - return -ENODEV; - q = irq_ptr->input_qs[nr]; - - /* - * Cannot rely on automatic sync after interrupt since queues may - * also be examined without interrupt. - */ - if (need_siga_sync(q)) - qdio_sync_queues(q); - - qdio_check_outbound_pci_queues(irq_ptr); - - /* Note: upper-layer MUST stop processing immediately here ... */ - if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) - return -EIO; - - return __qdio_inspect_queue(q, bufnr, error); -} -EXPORT_SYMBOL(qdio_get_next_buffers); - -/** * qdio_stop_irq - disable interrupt processing for the device * @cdev: associated ccw_device for the qdio subchannel * |