diff options
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/Makefile | 2 | ||||
-rw-r--r-- | drivers/s390/cio/airq.c | 6 | ||||
-rw-r--r-- | drivers/s390/cio/blacklist.c | 3 | ||||
-rw-r--r-- | drivers/s390/cio/ccwgroup.c | 78 | ||||
-rw-r--r-- | drivers/s390/cio/chp.c | 6 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 7 | ||||
-rw-r--r-- | drivers/s390/cio/chsc_sch.c | 4 | ||||
-rw-r--r-- | drivers/s390/cio/cio.c | 21 | ||||
-rw-r--r-- | drivers/s390/cio/crw.c | 159 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 62 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 57 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 8 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 8 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 3 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 222 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 1 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 23 |
19 files changed, 440 insertions, 234 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index bd79bd165396..adb3dd301528 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -3,7 +3,7 @@ # obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ - fcx.o itcw.o + fcx.o itcw.o crw.o ccw_device-objs += device.o device_fsm.o device_ops.o ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index fe6cea15bbaf..65d2e769dfa1 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -34,8 +34,8 @@ struct airq_t { void *drv_data; }; -static union indicator_t indicators[MAX_ISC]; -static struct airq_t *airqs[MAX_ISC][NR_AIRQS]; +static union indicator_t indicators[MAX_ISC+1]; +static struct airq_t *airqs[MAX_ISC+1][NR_AIRQS]; static int register_airq(struct airq_t *airq, u8 isc) { @@ -133,6 +133,8 @@ void do_adapter_IO(u8 isc) while (word) { if (word & INDICATOR_MASK) { airq = airqs[isc][i]; + /* Make sure gcc reads from airqs only once. */ + barrier(); if (likely(airq)) airq->handler(&indicators[isc].byte[i], airq->drv_data); diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index fe00be3675cd..6565f027791e 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -336,8 +336,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf, size_t user_len, loff_t *offset) { char *buf; - size_t i; - ssize_t rc, ret; + ssize_t rc, ret, i; if (*offset) return -EINVAL; diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 918e6fce2573..22ce765d537e 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -104,8 +104,9 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); out: if (rc) { - /* Release onoff "lock" when ungrouping failed. */ - atomic_set(&gdev->onoff, 0); + if (rc != -EAGAIN) + /* Release onoff "lock" when ungrouping failed. */ + atomic_set(&gdev->onoff, 0); return rc; } return count; @@ -314,16 +315,32 @@ error: } EXPORT_SYMBOL(ccwgroup_create_from_string); -static int __init -init_ccwgroup (void) +static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, + void *data); + +static struct notifier_block ccwgroup_nb = { + .notifier_call = ccwgroup_notifier +}; + +static int __init init_ccwgroup(void) { - return bus_register (&ccwgroup_bus_type); + int ret; + + ret = bus_register(&ccwgroup_bus_type); + if (ret) + return ret; + + ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb); + if (ret) + bus_unregister(&ccwgroup_bus_type); + + return ret; } -static void __exit -cleanup_ccwgroup (void) +static void __exit cleanup_ccwgroup(void) { - bus_unregister (&ccwgroup_bus_type); + bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb); + bus_unregister(&ccwgroup_bus_type); } module_init(init_ccwgroup); @@ -391,27 +408,28 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const unsigned long value; int ret; - gdev = to_ccwgroupdev(dev); if (!dev->driver) - return count; + return -ENODEV; + + gdev = to_ccwgroupdev(dev); + gdrv = to_ccwgroupdrv(dev->driver); - gdrv = to_ccwgroupdrv (gdev->dev.driver); if (!try_module_get(gdrv->owner)) return -EINVAL; ret = strict_strtoul(buf, 0, &value); if (ret) goto out; - ret = count; + if (value == 1) - ccwgroup_set_online(gdev); + ret = ccwgroup_set_online(gdev); else if (value == 0) - ccwgroup_set_offline(gdev); + ret = ccwgroup_set_offline(gdev); else ret = -EINVAL; out: module_put(gdrv->owner); - return ret; + return (ret == 0) ? count : ret; } static ssize_t @@ -453,13 +471,18 @@ ccwgroup_remove (struct device *dev) struct ccwgroup_device *gdev; struct ccwgroup_driver *gdrv; + device_remove_file(dev, &dev_attr_online); + device_remove_file(dev, &dev_attr_ungroup); + + if (!dev->driver) + return 0; + gdev = to_ccwgroupdev(dev); gdrv = to_ccwgroupdrv(dev->driver); - device_remove_file(dev, &dev_attr_online); - - if (gdrv && gdrv->remove) + if (gdrv->remove) gdrv->remove(gdev); + return 0; } @@ -468,9 +491,13 @@ static void ccwgroup_shutdown(struct device *dev) struct ccwgroup_device *gdev; struct ccwgroup_driver *gdrv; + if (!dev->driver) + return; + gdev = to_ccwgroupdev(dev); gdrv = to_ccwgroupdrv(dev->driver); - if (gdrv && gdrv->shutdown) + + if (gdrv->shutdown) gdrv->shutdown(gdev); } @@ -483,6 +510,19 @@ static struct bus_type ccwgroup_bus_type = { .shutdown = ccwgroup_shutdown, }; + +static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct device *dev = data; + + if (action == BUS_NOTIFY_UNBIND_DRIVER) + device_schedule_callback(dev, ccwgroup_ungroup_callback); + + return NOTIFY_OK; +} + + /** * ccwgroup_driver_register() - register a ccw group driver * @cdriver: driver to be registered diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 1246f61a5338..3e5f304ad88f 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -17,8 +17,8 @@ #include <linux/errno.h> #include <asm/chpid.h> #include <asm/sclp.h> +#include <asm/crw.h> -#include "../s390mach.h" #include "cio.h" #include "css.h" #include "ioasm.h" @@ -706,12 +706,12 @@ static int __init chp_init(void) struct chp_id chpid; int ret; - ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw); + ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw); if (ret) return ret; chp_wq = create_singlethread_workqueue("cio_chp"); if (!chp_wq) { - s390_unregister_crw_handler(CRW_RSC_CPATH); + crw_unregister_handler(CRW_RSC_CPATH); return -ENOMEM; } INIT_WORK(&cfg_work, cfg_func); diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index ebab6ea4659b..883f16f96f22 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -19,8 +19,8 @@ #include <asm/cio.h> #include <asm/chpid.h> #include <asm/chsc.h> +#include <asm/crw.h> -#include "../s390mach.h" #include "css.h" #include "cio.h" #include "cio_debug.h" @@ -589,6 +589,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) case 0x0102: case 0x0103: ret = -EINVAL; + break; default: ret = chsc_error_from_response(secm_area->response.code); } @@ -820,7 +821,7 @@ int __init chsc_alloc_sei_area(void) "chsc machine checks!\n"); return -ENOMEM; } - ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw); + ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); if (ret) kfree(sei_page); return ret; @@ -828,7 +829,7 @@ int __init chsc_alloc_sei_area(void) void __init chsc_free_sei_area(void) { - s390_unregister_crw_handler(CRW_RSC_CSS); + crw_unregister_handler(CRW_RSC_CSS); kfree(sei_page); } diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 0a2f2edafc03..93eca1731b81 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -84,8 +84,8 @@ static int chsc_subchannel_probe(struct subchannel *sch) kfree(private); } else { sch->private = private; - if (sch->dev.uevent_suppress) { - sch->dev.uevent_suppress = 0; + if (dev_get_uevent_suppress(&sch->dev)) { + dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); } } diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 659f8a791656..2aebb9823044 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -30,6 +30,8 @@ #include <asm/isc.h> #include <asm/cpu.h> #include <asm/fcx.h> +#include <asm/nmi.h> +#include <asm/crw.h> #include "cio.h" #include "css.h" #include "chsc.h" @@ -38,7 +40,6 @@ #include "blacklist.h" #include "cio_debug.h" #include "chp.h" -#include "../s390mach.h" debug_info_t *cio_debug_msg_id; debug_info_t *cio_debug_trace_id; @@ -471,6 +472,7 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); int cio_disable_subchannel(struct subchannel *sch) { char dbf_txt[15]; + int retry; int ret; CIO_TRACE_EVENT (2, "dissch"); @@ -481,16 +483,17 @@ int cio_disable_subchannel(struct subchannel *sch) if (cio_update_schib(sch)) return -ENODEV; - if (scsw_actl(&sch->schib.scsw) != 0) - /* - * the disable function must not be called while there are - * requests pending for completion ! - */ - return -EBUSY; - sch->config.ena = 0; - ret = cio_commit_config(sch); + for (retry = 0; retry < 3; retry++) { + ret = cio_commit_config(sch); + if (ret == -EBUSY) { + struct irb irb; + if (tsch(sch->schid, &irb) != 0) + break; + } else + break; + } sprintf (dbf_txt, "ret:%d", ret); CIO_TRACE_EVENT (2, dbf_txt); return ret; diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c new file mode 100644 index 000000000000..d157665d0e76 --- /dev/null +++ b/drivers/s390/cio/crw.c @@ -0,0 +1,159 @@ +/* + * Channel report handling code + * + * Copyright IBM Corp. 2000,2009 + * Author(s): Ingo Adlung <adlung@de.ibm.com>, + * Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Cornelia Huck <cornelia.huck@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, + */ + +#include <linux/semaphore.h> +#include <linux/mutex.h> +#include <linux/kthread.h> +#include <linux/init.h> +#include <asm/crw.h> + +static struct semaphore crw_semaphore; +static DEFINE_MUTEX(crw_handler_mutex); +static crw_handler_t crw_handlers[NR_RSCS]; + +/** + * crw_register_handler() - register a channel report word handler + * @rsc: reporting source code to handle + * @handler: handler to be registered + * + * Returns %0 on success and a negative error value otherwise. + */ +int crw_register_handler(int rsc, crw_handler_t handler) +{ + int rc = 0; + + if ((rsc < 0) || (rsc >= NR_RSCS)) + return -EINVAL; + mutex_lock(&crw_handler_mutex); + if (crw_handlers[rsc]) + rc = -EBUSY; + else + crw_handlers[rsc] = handler; + mutex_unlock(&crw_handler_mutex); + return rc; +} + +/** + * crw_unregister_handler() - unregister a channel report word handler + * @rsc: reporting source code to handle + */ +void crw_unregister_handler(int rsc) +{ + if ((rsc < 0) || (rsc >= NR_RSCS)) + return; + mutex_lock(&crw_handler_mutex); + crw_handlers[rsc] = NULL; + mutex_unlock(&crw_handler_mutex); +} + +/* + * Retrieve CRWs and call function to handle event. + */ +static int crw_collect_info(void *unused) +{ + struct crw crw[2]; + int ccode; + unsigned int chain; + int ignore; + +repeat: + ignore = down_interruptible(&crw_semaphore); + chain = 0; + while (1) { + crw_handler_t handler; + + if (unlikely(chain > 1)) { + struct crw tmp_crw; + + printk(KERN_WARNING"%s: Code does not support more " + "than two chained crws; please report to " + "linux390@de.ibm.com!\n", __func__); + ccode = stcrw(&tmp_crw); + printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + __func__, tmp_crw.slct, tmp_crw.oflw, + tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, + tmp_crw.erc, tmp_crw.rsid); + printk(KERN_WARNING"%s: This was crw number %x in the " + "chain\n", __func__, chain); + if (ccode != 0) + break; + chain = tmp_crw.chn ? chain + 1 : 0; + continue; + } + ccode = stcrw(&crw[chain]); + if (ccode != 0) + break; + printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw[chain].slct, crw[chain].oflw, crw[chain].chn, + crw[chain].rsc, crw[chain].anc, crw[chain].erc, + crw[chain].rsid); + /* Check for overflows. */ + if (crw[chain].oflw) { + int i; + + pr_debug("%s: crw overflow detected!\n", __func__); + mutex_lock(&crw_handler_mutex); + for (i = 0; i < NR_RSCS; i++) { + if (crw_handlers[i]) + crw_handlers[i](NULL, NULL, 1); + } + mutex_unlock(&crw_handler_mutex); + chain = 0; + continue; + } + if (crw[0].chn && !chain) { + chain++; + continue; + } + mutex_lock(&crw_handler_mutex); + handler = crw_handlers[crw[chain].rsc]; + if (handler) + handler(&crw[0], chain ? &crw[1] : NULL, 0); + mutex_unlock(&crw_handler_mutex); + /* chain is always 0 or 1 here. */ + chain = crw[chain].chn ? chain + 1 : 0; + } + goto repeat; + return 0; +} + +void crw_handle_channel_report(void) +{ + up(&crw_semaphore); +} + +/* + * Separate initcall needed for semaphore initialization since + * crw_handle_channel_report might be called before crw_machine_check_init. + */ +static int __init crw_init_semaphore(void) +{ + init_MUTEX_LOCKED(&crw_semaphore); + return 0; +} +pure_initcall(crw_init_semaphore); + +/* + * Machine checks for the channel subsystem must be enabled + * after the channel subsystem is initialized + */ +static int __init crw_machine_check_init(void) +{ + struct task_struct *task; + + task = kthread_run(crw_collect_info, NULL, "kmcheck"); + if (IS_ERR(task)) + return PTR_ERR(task); + ctl_set_bit(14, 28); /* enable channel report MCH */ + return 0; +} +device_initcall(crw_machine_check_init); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 8019288bc6de..0085d8901792 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -18,8 +18,8 @@ #include <linux/list.h> #include <linux/reboot.h> #include <asm/isc.h> +#include <asm/crw.h> -#include "../s390mach.h" #include "css.h" #include "cio.h" #include "cio_debug.h" @@ -83,6 +83,25 @@ static int call_fn_unknown_sch(struct subchannel_id schid, void *data) return rc; } +static int call_fn_all_sch(struct subchannel_id schid, void *data) +{ + struct cb_data *cb = data; + struct subchannel *sch; + int rc = 0; + + sch = get_subchannel_by_schid(schid); + if (sch) { + if (cb->fn_known_sch) + rc = cb->fn_known_sch(sch, cb->data); + put_device(&sch->dev); + } else { + if (cb->fn_unknown_sch) + rc = cb->fn_unknown_sch(schid, cb->data); + } + + return rc; +} + int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data) @@ -90,13 +109,17 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), struct cb_data cb; int rc; - cb.set = idset_sch_new(); - if (!cb.set) - return -ENOMEM; - idset_fill(cb.set); cb.data = data; cb.fn_known_sch = fn_known; cb.fn_unknown_sch = fn_unknown; + + cb.set = idset_sch_new(); + if (!cb.set) + /* fall back to brute force scanning in case of oom */ + return for_each_subchannel(call_fn_all_sch, &cb); + + idset_fill(cb.set); + /* Process registered subchannels. */ rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); if (rc) @@ -272,7 +295,7 @@ static int css_register_subchannel(struct subchannel *sch) * the subchannel driver can decide itself when it wants to inform * userspace of its existence. */ - sch->dev.uevent_suppress = 1; + dev_set_uevent_suppress(&sch->dev, 1); css_update_ssd_info(sch); /* make it known to the system */ ret = css_sch_device_register(sch); @@ -287,7 +310,7 @@ static int css_register_subchannel(struct subchannel *sch) * a fitting driver module may be loaded based on the * modalias. */ - sch->dev.uevent_suppress = 0; + dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); } return ret; @@ -510,6 +533,17 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data) return ret; } +static void reprobe_after_idle(struct work_struct *unused) +{ + /* Make sure initial subchannel scan is done. */ + wait_event(ccw_device_init_wq, + atomic_read(&ccw_device_init_count) == 0); + if (need_reprobe) + css_schedule_reprobe(); +} + +static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); + /* Work function used to reprobe all unregistered subchannels. */ static void reprobe_all(struct work_struct *unused) { @@ -517,10 +551,12 @@ static void reprobe_all(struct work_struct *unused) CIO_MSG_EVENT(4, "reprobe start\n"); - need_reprobe = 0; /* Make sure initial subchannel scan is done. */ - wait_event(ccw_device_init_wq, - atomic_read(&ccw_device_init_count) == 0); + if (atomic_read(&ccw_device_init_count) != 0) { + queue_work(ccw_device_work, &reprobe_idle_work); + return; + } + need_reprobe = 0; ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, @@ -619,7 +655,7 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high) css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; } else { #ifdef CONFIG_SMP - css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); + css->global_pgid.pgid_high.cpu_addr = stap(); #else css->global_pgid.pgid_high.cpu_addr = 0; #endif @@ -765,7 +801,7 @@ init_channel_subsystem (void) if (ret) goto out; - ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); + ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); if (ret) goto out; @@ -845,7 +881,7 @@ out_unregister: out_bus: bus_unregister(&css_bus_type); out: - s390_unregister_crw_handler(CRW_RSC_CSS); + crw_unregister_handler(CRW_RSC_CSS); chsc_free_sei_area(); kfree(slow_subchannel_set); pr_alert("The CSS device driver initialization failed with " diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 23d5752349b5..c4d2f667a2f6 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -457,12 +457,13 @@ int ccw_device_set_online(struct ccw_device *cdev) return (ret == 0) ? -ENODEV : ret; } -static void online_store_handle_offline(struct ccw_device *cdev) +static int online_store_handle_offline(struct ccw_device *cdev) { if (cdev->private->state == DEV_STATE_DISCONNECTED) ccw_device_remove_disconnected(cdev); - else if (cdev->drv && cdev->drv->set_offline) - ccw_device_set_offline(cdev); + else if (cdev->online && cdev->drv && cdev->drv->set_offline) + return ccw_device_set_offline(cdev); + return 0; } static int online_store_recog_and_online(struct ccw_device *cdev) @@ -530,13 +531,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, goto out; switch (i) { case 0: - online_store_handle_offline(cdev); - ret = count; + ret = online_store_handle_offline(cdev); break; case 1: ret = online_store_handle_online(cdev, force); - if (!ret) - ret = count; break; default: ret = -EINVAL; @@ -545,7 +543,7 @@ out: if (cdev->drv) module_put(cdev->drv->owner); atomic_set(&cdev->private->onoff, 0); - return ret; + return (ret < 0) ? ret : count; } static ssize_t @@ -681,35 +679,22 @@ get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css, return dev ? to_ccwdev(dev) : NULL; } -static void -ccw_device_add_changed(struct work_struct *work) -{ - struct ccw_device_private *priv; - struct ccw_device *cdev; - - priv = container_of(work, struct ccw_device_private, kick_work); - cdev = priv->cdev; - if (device_add(&cdev->dev)) { - put_device(&cdev->dev); - return; - } - set_bit(1, &cdev->private->registered); -} - -void ccw_device_do_unreg_rereg(struct work_struct *work) +void ccw_device_do_unbind_bind(struct work_struct *work) { struct ccw_device_private *priv; struct ccw_device *cdev; struct subchannel *sch; + int ret; priv = container_of(work, struct ccw_device_private, kick_work); cdev = priv->cdev; sch = to_subchannel(cdev->dev.parent); - ccw_device_unregister(cdev); - PREPARE_WORK(&cdev->private->kick_work, - ccw_device_add_changed); - queue_work(ccw_device_work, &cdev->private->kick_work); + if (test_bit(1, &cdev->private->registered)) { + device_release_driver(&cdev->dev); + ret = device_attach(&cdev->dev); + WARN_ON(ret == -ENODEV); + } } static void @@ -799,7 +784,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch, return; other_sch = to_subchannel(cdev->dev.parent); /* Note: device_move() changes cdev->dev.parent */ - ret = device_move(&cdev->dev, &sch->dev); + ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); if (ret) { CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " "(ret=%d)!\n", cdev->private->dev_id.ssid, @@ -830,7 +815,7 @@ static void sch_attach_orphaned_device(struct subchannel *sch, * Try to move the ccw device to its new subchannel. * Note: device_move() changes cdev->dev.parent */ - ret = device_move(&cdev->dev, &sch->dev); + ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); if (ret) { CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " "failed (ret=%d)!\n", @@ -897,7 +882,8 @@ void ccw_device_move_to_orphanage(struct work_struct *work) * ccw device can take its place on the subchannel. * Note: device_move() changes cdev->dev.parent */ - ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); + ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev, + DPM_ORDER_NONE); if (ret) { CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " "(ret=%d)!\n", cdev->private->dev_id.ssid, @@ -981,7 +967,7 @@ io_subchannel_register(struct work_struct *work) * Now we know this subchannel will stay, we can throw * our delayed uevent. */ - sch->dev.uevent_suppress = 0; + dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); /* make it known to the system */ ret = ccw_device_register(cdev); @@ -1034,8 +1020,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work) void io_subchannel_recog_done(struct ccw_device *cdev) { - struct subchannel *sch; - if (css_init_done == 0) { cdev->private->flags.recog_done = 1; return; @@ -1046,7 +1030,6 @@ io_subchannel_recog_done(struct ccw_device *cdev) /* Remove device found not operational. */ if (!get_device(&cdev->dev)) break; - sch = to_subchannel(cdev->dev.parent); PREPARE_WORK(&cdev->private->kick_work, ccw_device_call_sch_unregister); queue_work(slow_path_wq, &cdev->private->kick_work); @@ -1129,7 +1112,7 @@ static void ccw_device_move_to_sch(struct work_struct *work) * Try to move the ccw device to its new subchannel. * Note: device_move() changes cdev->dev.parent */ - rc = device_move(&cdev->dev, &sch->dev); + rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); mutex_unlock(&sch->reg_mutex); if (rc) { CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel " @@ -1243,7 +1226,7 @@ static int io_subchannel_probe(struct subchannel *sch) * the ccw_device and exit. This happens for all early * devices, e.g. the console. */ - sch->dev.uevent_suppress = 0; + dev_set_uevent_suppress(&sch->dev, 0); kobject_uevent(&sch->dev.kobj, KOBJ_ADD); cdev->dev.groups = ccwdev_attr_groups; device_initialize(&cdev->dev); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 0f2e63ea48de..85e01846ca65 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -80,7 +80,7 @@ void io_subchannel_init_config(struct subchannel *sch); int ccw_device_cancel_halt_clear(struct ccw_device *); -void ccw_device_do_unreg_rereg(struct work_struct *); +void ccw_device_do_unbind_bind(struct work_struct *); void ccw_device_move_to_orphanage(struct work_struct *); int ccw_device_is_orphan(struct ccw_device *); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 8df5eaafc5ab..87b4bfca080f 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -194,7 +194,7 @@ ccw_device_handle_oper(struct ccw_device *cdev) cdev->id.dev_type != cdev->private->senseid.dev_type || cdev->id.dev_model != cdev->private->senseid.dev_model) { PREPARE_WORK(&cdev->private->kick_work, - ccw_device_do_unreg_rereg); + ccw_device_do_unbind_bind); queue_work(ccw_device_work, &cdev->private->kick_work); return 0; } @@ -366,7 +366,7 @@ static void ccw_device_oper_notify(struct ccw_device *cdev) } /* Driver doesn't want device back. */ ccw_device_set_notoper(cdev); - PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unreg_rereg); + PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); queue_work(ccw_device_work, &cdev->private->kick_work); } @@ -728,7 +728,7 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev, { struct subchannel *sch; - cdev->private->state = DEV_STATE_NOT_OPER; + ccw_device_set_notoper(cdev); sch = to_subchannel(cdev->dev.parent); css_schedule_eval(sch->schid); } @@ -1052,7 +1052,7 @@ ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); /* * An interrupt in state offline means a previous disable was not - * successful. Try again. + * successful - should not happen, but we try to disable again. */ cio_disable_subchannel(sch); } diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index eabcc42d63df..151754d54745 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -680,7 +680,7 @@ int ccw_device_tm_intrg(struct ccw_device *cdev) if (cdev->private->state != DEV_STATE_ONLINE) return -EIO; if (!scsw_is_tm(&sch->schib.scsw) || - !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND)) + !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND)) return -EINVAL; return cio_tm_intrg(sch); } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 42f2b09631b6..13bcb8114388 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -186,6 +186,9 @@ struct qdio_input_q { /* input buffer acknowledgement flag */ int polling; + /* first ACK'ed buffer */ + int ack_start; + /* how much sbals are acknowledged with qebsm */ int ack_count; @@ -234,7 +237,7 @@ struct qdio_q { int first_to_check; /* first_to_check of the last time */ - int last_move_ftc; + int last_move; /* beginning position for calling the program */ int first_to_kick; @@ -244,7 +247,6 @@ struct qdio_q { struct qdio_irq *irq_ptr; struct tasklet_struct tasklet; - spinlock_t lock; /* error condition during a data transfer */ unsigned int qdio_error; @@ -354,7 +356,7 @@ int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, int auto_ack); void qdio_check_outbound_after_thinint(struct qdio_q *q); int qdio_inbound_q_moved(struct qdio_q *q); -void qdio_kick_inbound_handler(struct qdio_q *q); +void qdio_kick_handler(struct qdio_q *q); void qdio_stop_polling(struct qdio_q *q); int qdio_siga_sync_q(struct qdio_q *q); diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index da7afb04e71f..e3434b34f86c 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -63,8 +63,9 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); seq_printf(m, "ftc: %d\n", q->first_to_check); - seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); + seq_printf(m, "last_move: %d\n", q->last_move); seq_printf(m, "polling: %d\n", q->u.in.polling); + seq_printf(m, "ack start: %d\n", q->u.in.ack_start); seq_printf(m, "ack count: %d\n", q->u.in.ack_count); seq_printf(m, "slsb buffer states:\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 10cb0f8726e5..9e8a2914259b 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -380,11 +380,11 @@ inline void qdio_stop_polling(struct qdio_q *q) /* show the card that we are not polling anymore */ if (is_qebsm(q)) { - set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, q->u.in.ack_count); q->u.in.ack_count = 0; } else - set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); + set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } static void announce_buffer_error(struct qdio_q *q, int count) @@ -419,15 +419,15 @@ static inline void inbound_primed(struct qdio_q *q, int count) if (!q->u.in.polling) { q->u.in.polling = 1; q->u.in.ack_count = count; - q->last_move_ftc = q->first_to_check; + q->u.in.ack_start = q->first_to_check; return; } /* delete the previous ACK's */ - set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, q->u.in.ack_count); q->u.in.ack_count = count; - q->last_move_ftc = q->first_to_check; + q->u.in.ack_start = q->first_to_check; return; } @@ -439,14 +439,13 @@ static inline void inbound_primed(struct qdio_q *q, int count) if (q->u.in.polling) { /* reset the previous ACK but first set the new one */ set_buf_state(q, new, SLSB_P_INPUT_ACK); - set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); - } - else { + set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); + } else { q->u.in.polling = 1; - set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK); + set_buf_state(q, new, SLSB_P_INPUT_ACK); } - q->last_move_ftc = new; + q->u.in.ack_start = new; count--; if (!count) return; @@ -455,7 +454,7 @@ static inline void inbound_primed(struct qdio_q *q, int count) * Need to change all PRIMED buffers to NOT_INIT, otherwise * we're loosing initiative in the thinint code. */ - set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT, + set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); } @@ -523,7 +522,8 @@ int qdio_inbound_q_moved(struct qdio_q *q) bufnr = get_inbound_buffer_frontier(q); - if ((bufnr != q->last_move_ftc) || q->qdio_error) { + if ((bufnr != q->last_move) || q->qdio_error) { + q->last_move = bufnr; if (!need_siga_sync(q) && !pci_out_supported(q)) q->u.in.timestamp = get_usecs(); @@ -570,29 +570,30 @@ static int qdio_inbound_q_done(struct qdio_q *q) } } -void qdio_kick_inbound_handler(struct qdio_q *q) +void qdio_kick_handler(struct qdio_q *q) { - int count, start, end; - - qdio_perf_stat_inc(&perf_stats.inbound_handler); - - start = q->first_to_kick; - end = q->first_to_check; - if (end >= start) - count = end - start; - else - count = end + QDIO_MAX_BUFFERS_PER_Q - start; - - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); + int start = q->first_to_kick; + int end = q->first_to_check; + int count; if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; - q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, - start, count, q->irq_ptr->int_parm); + count = sub_buf(end, start); + + if (q->is_input_q) { + qdio_perf_stat_inc(&perf_stats.inbound_handler); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); + } else { + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); + } + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, + q->irq_ptr->int_parm); /* for the next time */ - q->first_to_kick = q->first_to_check; + q->first_to_kick = end; q->qdio_error = 0; } @@ -603,7 +604,7 @@ again: if (!qdio_inbound_q_moved(q)) return; - qdio_kick_inbound_handler(q); + qdio_kick_handler(q); if (!qdio_inbound_q_done(q)) /* means poll time is not yet over */ @@ -698,21 +699,21 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) bufnr = get_outbound_buffer_frontier(q); - if ((bufnr != q->last_move_ftc) || q->qdio_error) { - q->last_move_ftc = bufnr; + if ((bufnr != q->last_move) || q->qdio_error) { + q->last_move = bufnr; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); return 1; } else return 0; } -static void qdio_kick_outbound_q(struct qdio_q *q) +static int qdio_kick_outbound_q(struct qdio_q *q) { unsigned int busy_bit; int cc; if (!need_siga_out(q)) - return; + return 0; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); qdio_perf_stat_inc(&perf_stats.siga_out); @@ -724,75 +725,37 @@ static void qdio_kick_outbound_q(struct qdio_q *q) case 2: if (busy_bit) { DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); - q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY; - } else { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", - q->nr); - q->qdio_error = cc; - } + cc |= QDIO_ERROR_SIGA_BUSY; + } else + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); break; case 1: case 3: DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); - q->qdio_error = cc; break; } -} - -static void qdio_kick_outbound_handler(struct qdio_q *q) -{ - int start, end, count; - - start = q->first_to_kick; - end = q->last_move_ftc; - if (end >= start) - count = end - start; - else - count = end + QDIO_MAX_BUFFERS_PER_Q - start; - - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); - - if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) - return; - - q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, - q->irq_ptr->int_parm); - - /* for the next time: */ - q->first_to_kick = q->last_move_ftc; - q->qdio_error = 0; + return cc; } static void __qdio_outbound_processing(struct qdio_q *q) { - unsigned long flags; - qdio_perf_stat_inc(&perf_stats.tasklet_outbound); - spin_lock_irqsave(&q->lock, flags); - BUG_ON(atomic_read(&q->nr_buf_used) < 0); if (qdio_outbound_q_moved(q)) - qdio_kick_outbound_handler(q); - - spin_unlock_irqrestore(&q->lock, flags); + qdio_kick_handler(q); - if (queue_type(q) == QDIO_ZFCP_QFMT) { + if (queue_type(q) == QDIO_ZFCP_QFMT) if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) - tasklet_schedule(&q->tasklet); - return; - } + goto sched; /* bail out for HiperSockets unicast queues */ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) return; if ((queue_type(q) == QDIO_IQDIO_QFMT) && - (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) { - tasklet_schedule(&q->tasklet); - return; - } + (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) + goto sched; if (q->u.out.pci_out_enabled) return; @@ -810,6 +773,12 @@ static void __qdio_outbound_processing(struct qdio_q *q) qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); } } + return; + +sched: + if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + return; + tasklet_schedule(&q->tasklet); } /* outbound tasklet */ @@ -822,6 +791,9 @@ void qdio_outbound_processing(unsigned long data) void qdio_outbound_timer(unsigned long data) { struct qdio_q *q = (struct qdio_q *)data; + + if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + return; tasklet_schedule(&q->tasklet); } @@ -863,6 +835,9 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) int i; struct qdio_q *q; + if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) + return; + qdio_perf_stat_inc(&perf_stats.pci_int); for_each_input_queue(irq_ptr, q, i) @@ -1065,8 +1040,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); * @cdev: associated ccw device * @how: use halt or clear to shutdown * - * This function calls qdio_shutdown() for @cdev with method @how - * and on success qdio_free() for @cdev. + * This function calls qdio_shutdown() for @cdev with method @how. + * and qdio_free(). The qdio_free() return value is ignored since + * !irq_ptr is already checked. */ int qdio_cleanup(struct ccw_device *cdev, int how) { @@ -1077,8 +1053,8 @@ int qdio_cleanup(struct ccw_device *cdev, int how) return -ENODEV; rc = qdio_shutdown(cdev, how); - if (rc == 0) - rc = qdio_free(cdev); + + qdio_free(cdev); return rc; } EXPORT_SYMBOL_GPL(qdio_cleanup); @@ -1090,11 +1066,11 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) int i; for_each_input_queue(irq_ptr, q, i) - tasklet_disable(&q->tasklet); + tasklet_kill(&q->tasklet); for_each_output_queue(irq_ptr, q, i) { - tasklet_disable(&q->tasklet); del_timer(&q->u.out.timer); + tasklet_kill(&q->tasklet); } } @@ -1112,6 +1088,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) if (!irq_ptr) return -ENODEV; + BUG_ON(irqs_disabled()); DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); mutex_lock(&irq_ptr->setup_mutex); @@ -1124,6 +1101,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how) return 0; } + /* + * Indicate that the device is going down. Scheduling the queue + * tasklets is forbidden from here on. + */ + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); + tiqdio_remove_input_queues(irq_ptr); qdio_shutdown_queues(cdev); qdio_shutdown_debug_entries(irq_ptr, cdev); @@ -1403,9 +1386,8 @@ int qdio_activate(struct ccw_device *cdev) switch (irq_ptr->state) { case QDIO_IRQ_STATE_STOPPED: case QDIO_IRQ_STATE_ERR: - mutex_unlock(&irq_ptr->setup_mutex); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return -EIO; + rc = -EIO; + break; default: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); rc = 0; @@ -1442,10 +1424,10 @@ static inline int buf_in_between(int bufnr, int start, int count) * @bufnr: first buffer to process * @count: how many buffers are emptied */ -static void handle_inbound(struct qdio_q *q, unsigned int callflags, - int bufnr, int count) +static int handle_inbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) { - int used, cc, diff; + int used, diff; if (!q->u.in.polling) goto set; @@ -1456,19 +1438,18 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags, q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; - } else if (buf_in_between(q->last_move_ftc, bufnr, count)) { + } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { if (is_qebsm(q)) { - /* partial overwrite, just update last_move_ftc */ + /* partial overwrite, just update ack_start */ diff = add_buf(bufnr, count); - diff = sub_buf(diff, q->last_move_ftc); + diff = sub_buf(diff, q->u.in.ack_start); q->u.in.ack_count -= diff; if (q->u.in.ack_count <= 0) { q->u.in.polling = 0; q->u.in.ack_count = 0; - /* TODO: must we set last_move_ftc to something meaningful? */ goto set; } - q->last_move_ftc = add_buf(q->last_move_ftc, diff); + q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); } else /* the only ACK will be deleted, so stop polling */ @@ -1483,13 +1464,11 @@ set: /* no need to signal as long as the adapter had free buffers */ if (used) - return; + return 0; - if (need_siga_in(q)) { - cc = qdio_siga_input(q); - if (cc) - q->qdio_error = cc; - } + if (need_siga_in(q)) + return qdio_siga_input(q); + return 0; } /** @@ -1499,11 +1478,11 @@ set: * @bufnr: first buffer to process * @count: how many buffers are filled */ -static void handle_outbound(struct qdio_q *q, unsigned int callflags, - int bufnr, int count) +static int handle_outbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) { unsigned char state; - int used; + int used, rc = 0; qdio_perf_stat_inc(&perf_stats.outbound_handler); @@ -1518,27 +1497,26 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, if (queue_type(q) == QDIO_IQDIO_QFMT) { if (multicast_outbound(q)) - qdio_kick_outbound_q(q); + rc = qdio_kick_outbound_q(q); else if ((q->irq_ptr->ssqd_desc.mmwc > 1) && (count > 1) && (count <= q->irq_ptr->ssqd_desc.mmwc)) { /* exploit enhanced SIGA */ q->u.out.use_enh_siga = 1; - qdio_kick_outbound_q(q); + rc = qdio_kick_outbound_q(q); } else { /* * One siga-w per buffer required for unicast * HiperSockets. */ q->u.out.use_enh_siga = 0; - while (count--) - qdio_kick_outbound_q(q); + while (count--) { + rc = qdio_kick_outbound_q(q); + if (rc) + goto out; + } } - - /* report CC=2 conditions synchronously */ - if (q->qdio_error) - __qdio_outbound_processing(q); goto out; } @@ -1550,14 +1528,14 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, /* try to fast requeue buffers */ get_buf_state(q, prev_buf(bufnr), &state, 0); if (state != SLSB_CU_OUTPUT_PRIMED) - qdio_kick_outbound_q(q); + rc = qdio_kick_outbound_q(q); else { DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); qdio_perf_stat_inc(&perf_stats.fast_requeue); } out: - /* Fixme: could wait forever if called from process context */ tasklet_schedule(&q->tasklet); + return rc; } /** @@ -1596,14 +1574,12 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, return -EBUSY; if (callflags & QDIO_FLAG_SYNC_INPUT) - handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, - count); + return handle_inbound(irq_ptr->input_qs[q_nr], + callflags, bufnr, count); else if (callflags & QDIO_FLAG_SYNC_OUTPUT) - handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, - count); - else - return -EINVAL; - return 0; + return handle_outbound(irq_ptr->output_qs[q_nr], + callflags, bufnr, count); + return -EINVAL; } EXPORT_SYMBOL_GPL(do_QDIO); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index c08356b95bf5..18d54fc21ce9 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -117,7 +117,6 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, q->mask = 1 << (31 - i); q->nr = i; q->handler = handler; - spin_lock_init(&q->lock); } static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 8e90e147b746..c655d011a78d 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -31,6 +31,7 @@ /* list of thin interrupt input queues */ static LIST_HEAD(tiq_list); +DEFINE_MUTEX(tiq_list_lock); /* adapter local summary indicator */ static unsigned char *tiqdio_alsi; @@ -95,12 +96,11 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) css_qdio_omit_svs = 1; - for_each_input_queue(irq_ptr, q, i) { + mutex_lock(&tiq_list_lock); + for_each_input_queue(irq_ptr, q, i) list_add_rcu(&q->entry, &tiq_list); - synchronize_rcu(); - } + mutex_unlock(&tiq_list_lock); xchg(irq_ptr->dsci, 1); - tasklet_schedule(&tiqdio_tasklet); } /* @@ -118,7 +118,10 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) /* if establish triggered an error */ if (!q || !q->entry.prev || !q->entry.next) continue; + + mutex_lock(&tiq_list_lock); list_del_rcu(&q->entry); + mutex_unlock(&tiq_list_lock); synchronize_rcu(); } } @@ -155,15 +158,15 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) */ qdio_check_outbound_after_thinint(q); -again: if (!qdio_inbound_q_moved(q)) return; - qdio_kick_inbound_handler(q); + qdio_kick_handler(q); if (!tiqdio_inbound_q_done(q)) { qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); - goto again; + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) + tasklet_schedule(&q->tasklet); } qdio_stop_polling(q); @@ -173,7 +176,8 @@ again: */ if (!tiqdio_inbound_q_done(q)) { qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); - goto again; + if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) + tasklet_schedule(&q->tasklet); } } @@ -366,10 +370,11 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) void __exit tiqdio_unregister_thinints(void) { - tasklet_disable(&tiqdio_tasklet); + WARN_ON(!list_empty(&tiq_list)); if (tiqdio_alsi) { s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); isc_unregister(QDIO_AIRQ_ISC); } + tasklet_kill(&tiqdio_tasklet); } |