diff options
Diffstat (limited to 'drivers/s390/cio')
38 files changed, 7232 insertions, 5482 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index cfaf77b320f5..bd79bd165396 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -2,9 +2,13 @@ # Makefile for the S/390 common i/o drivers # -obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o +obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ + fcx.o itcw.o ccw_device-objs += device.o device_fsm.o device_ops.o ccw_device-objs += device_id.o device_pgid.o device_status.o obj-y += ccw_device.o cmf.o +obj-$(CONFIG_CHSC_SCH) += chsc_sch.o obj-$(CONFIG_CCWGROUP) += ccwgroup.o + +qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index b7a07a866291..fe6cea15bbaf 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -15,6 +15,7 @@ #include <linux/rcupdate.h> #include <asm/airq.h> +#include <asm/isc.h> #include "cio.h" #include "cio_debug.h" @@ -33,15 +34,15 @@ struct airq_t { void *drv_data; }; -static union indicator_t indicators; -static struct airq_t *airqs[NR_AIRQS]; +static union indicator_t indicators[MAX_ISC]; +static struct airq_t *airqs[MAX_ISC][NR_AIRQS]; -static int register_airq(struct airq_t *airq) +static int register_airq(struct airq_t *airq, u8 isc) { int i; for (i = 0; i < NR_AIRQS; i++) - if (!cmpxchg(&airqs[i], NULL, airq)) + if (!cmpxchg(&airqs[isc][i], NULL, airq)) return i; return -ENOMEM; } @@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq) * s390_register_adapter_interrupt() - register adapter interrupt handler * @handler: adapter handler to be registered * @drv_data: driver data passed with each call to the handler + * @isc: isc for which the handler should be called * * Returns: * Pointer to the indicator to be used on success * ERR_PTR() if registration failed */ void *s390_register_adapter_interrupt(adapter_int_handler_t handler, - void *drv_data) + void *drv_data, u8 isc) { struct airq_t *airq; char dbf_txt[16]; int ret; + if (isc > MAX_ISC) + return ERR_PTR(-EINVAL); airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); if (!airq) { ret = -ENOMEM; @@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler, } airq->handler = handler; airq->drv_data = drv_data; - ret = register_airq(airq); - if (ret < 0) - kfree(airq); + + ret = register_airq(airq, isc); out: snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); CIO_TRACE_EVENT(4, dbf_txt); - if (ret < 0) + if (ret < 0) { + kfree(airq); return ERR_PTR(ret); - else - return &indicators.byte[ret]; + } else + return &indicators[isc].byte[ret]; } EXPORT_SYMBOL(s390_register_adapter_interrupt); /** * s390_unregister_adapter_interrupt - unregister adapter interrupt handler * @ind: indicator for which the handler is to be unregistered + * @isc: interruption subclass */ -void s390_unregister_adapter_interrupt(void *ind) +void s390_unregister_adapter_interrupt(void *ind, u8 isc) { struct airq_t *airq; char dbf_txt[16]; int i; - i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); + i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]); snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); CIO_TRACE_EVENT(4, dbf_txt); - indicators.byte[i] = 0; - airq = xchg(&airqs[i], NULL); + indicators[isc].byte[i] = 0; + airq = xchg(&airqs[isc][i], NULL); /* * Allow interrupts to complete. This will ensure that the airq handle * is no longer referenced by any interrupt handler. @@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt); #define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) -void do_adapter_IO(void) +void do_adapter_IO(u8 isc) { int w; int i; @@ -120,22 +125,22 @@ void do_adapter_IO(void) * fetch operations. */ for (w = 0; w < NR_AIRQ_WORDS; w++) { - word = indicators.word[w]; + word = indicators[isc].word[w]; i = w * NR_AIRQS_PER_WORD; /* * Check bytes within word for active indicators. */ while (word) { if (word & INDICATOR_MASK) { - airq = airqs[i]; + airq = airqs[isc][i]; if (likely(airq)) - airq->handler(&indicators.byte[i], + airq->handler(&indicators[isc].byte[i], airq->drv_data); else /* * Reset ill-behaved indicator. */ - indicators.byte[i] = 0; + indicators[isc].byte[i] = 0; } word <<= 8; i++; diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index a4a5f2efea48..0bfcbbe375c4 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -97,8 +97,8 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit, return 0; } -static int parse_busid(char *str, int *cssid, int *ssid, int *devno, - int msgtrigger) +static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, + unsigned int *devno, int msgtrigger) { char *str_work; int val, rc, ret; @@ -148,7 +148,7 @@ out: static int blacklist_parse_parameters(char *str, range_action action, int msgtrigger) { - int from_cssid, to_cssid, from_ssid, to_ssid, from, to; + unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; int rc, totalrc; char *parm; range_action ra; diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 297cdceb0ca4..db00b0591733 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -18,6 +18,7 @@ #include <asm/chpid.h> #include <asm/sclp.h> +#include "../s390mach.h" #include "cio.h" #include "css.h" #include "ioasm.h" @@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch) } return opm; } +EXPORT_SYMBOL_GPL(chp_get_sch_opm); /** * chp_is_registered - check if a channel-path is registered @@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on) CIO_TRACE_EVENT(2, dbf_text); status = chp_get_status(chpid); - if (!on && !status) { - printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", - chpid.cssid, chpid.id); - return -EINVAL; - } + if (!on && !status) + return 0; set_chp_logically_online(chpid, on); chsc_chp_vary(chpid, on); @@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj, { struct channel_path *chp; struct device *device; - unsigned int size; device = container_of(kobj, struct device, kobj); chp = to_channelpath(device); if (!chp->cmg_chars) return 0; - size = sizeof(struct cmg_chars); - - if (off > size) - return 0; - if (off + count > size) - count = size - off; - memcpy(buf, chp->cmg_chars + off, count); - return count; + return memory_read_from_buffer(buf, count, &off, + chp->cmg_chars, sizeof(struct cmg_chars)); } static struct bin_attribute chp_measurement_chars_attr = { @@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid) chpid.id); /* Obtain channel path description and fill it in. */ - ret = chsc_determine_channel_path_description(chpid, &chp->desc); + ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) { @@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid) goto out_free; } /* Get channel-measurement characteristics. */ - if (css_characteristics_avail && css_chsc_characteristics.scmc - && css_chsc_characteristics.secm) { + if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { ret = chsc_get_channel_measurement_chars(chp); if (ret) goto out_free; @@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid) /** * chp_process_crw - process channel-path status change - * @id: channel-path ID number - * @status: non-zero if channel-path has become available, zero otherwise + * @crw0: channel report-word to handler + * @crw1: second channel-report word (always NULL) + * @overflow: crw overflow indication * * Handle channel-report-words indicating that the status of a channel-path * has changed. */ -void chp_process_crw(int id, int status) +static void chp_process_crw(struct crw *crw0, struct crw *crw1, + int overflow) { struct chp_id chpid; + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); + /* + * Check for solicited machine checks. These are + * created by reset channel path and need not be + * handled here. + */ + if (crw0->slct) { + CIO_CRW_EVENT(2, "solicited machine check for " + "channel path %02X\n", crw0->rsid); + return; + } chp_id_init(&chpid); - chpid.id = id; - if (status) { + chpid.id = crw0->rsid; + switch (crw0->erc) { + case CRW_ERC_IPARM: /* Path has come. */ if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); - } else + break; + case CRW_ERC_PERRI: /* Path has gone. */ + case CRW_ERC_PERRN: chsc_chp_offline(chpid); + break; + default: + CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n", + crw0->erc); + } } +int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link) +{ + int i; + int mask; + + for (i = 0; i < 8; i++) { + mask = 0x80 >> i; + if (!(ssd->path_mask & mask)) + continue; + if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid)) + continue; + if ((ssd->fla_valid_mask & mask) && + ((ssd->fla[i] & link->fla_mask) != link->fla)) + continue; + return mask; + } + return 0; +} +EXPORT_SYMBOL_GPL(chp_ssd_get_mask); + static inline int info_bit_num(struct chp_id id) { return id.id + id.cssid * (__MAX_CHPID + 1); @@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work) { struct chp_id chpid; enum cfg_task_t t; + int rc; mutex_lock(&cfg_lock); t = cfg_none; @@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work) switch (t) { case cfg_configure: - sclp_chp_configure(chpid); - info_expire(); - chsc_chp_online(chpid); + rc = sclp_chp_configure(chpid); + if (rc) + CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)=" + "%d\n", chpid.cssid, chpid.id, rc); + else { + info_expire(); + chsc_chp_online(chpid); + } break; case cfg_deconfigure: - sclp_chp_deconfigure(chpid); - info_expire(); - chsc_chp_offline(chpid); + rc = sclp_chp_deconfigure(chpid); + if (rc) + CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)=" + "%d\n", chpid.cssid, chpid.id, rc); + else { + info_expire(); + chsc_chp_offline(chpid); + } break; case cfg_none: /* Get updated information after last change. */ @@ -654,10 +704,16 @@ static int cfg_wait_idle(void) static int __init chp_init(void) { struct chp_id chpid; + int ret; + ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw); + if (ret) + return ret; chp_wq = create_singlethread_workqueue("cio_chp"); - if (!chp_wq) + if (!chp_wq) { + s390_unregister_crw_handler(CRW_RSC_CPATH); return -ENOMEM; + } INIT_WORK(&cfg_work, cfg_func); init_waitqueue_head(&cfg_wait_queue); if (info_update()) diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 65286563c592..26c3d2246176 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h @@ -12,12 +12,24 @@ #include <linux/device.h> #include <asm/chpid.h> #include "chsc.h" +#include "css.h" #define CHP_STATUS_STANDBY 0 #define CHP_STATUS_CONFIGURED 1 #define CHP_STATUS_RESERVED 2 #define CHP_STATUS_NOT_RECOGNIZED 3 +#define CHP_ONLINE 0 +#define CHP_OFFLINE 1 +#define CHP_VARY_ON 2 +#define CHP_VARY_OFF 3 + +struct chp_link { + struct chp_id chpid; + u32 fla_mask; + u16 fla; +}; + static inline int chp_test_bit(u8 *bitmap, int num) { int byte = num >> 3; @@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid); u8 chp_get_sch_opm(struct subchannel *sch); int chp_is_registered(struct chp_id chpid); void *chp_get_chp_desc(struct chp_id chpid); -void chp_process_crw(int id, int available); void chp_remove_cmg_attr(struct channel_path *chp); int chp_add_cmg_attr(struct channel_path *chp); int chp_new(struct chp_id chpid); void chp_cfg_schedule(struct chp_id chpid, int configure); void chp_cfg_cancel_deconfigure(struct chp_id chpid); int chp_info_get_status(struct chp_id chpid); - +int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *); #endif /* S390_CHP_H */ diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 5de86908b0d0..29826fdd47b8 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -2,8 +2,7 @@ * drivers/s390/cio/chsc.c * S/390 common I/O routines -- channel subsystem call * - * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 1999,2008 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -16,7 +15,9 @@ #include <asm/cio.h> #include <asm/chpid.h> +#include <asm/chsc.h> +#include "../s390mach.h" #include "css.h" #include "cio.h" #include "cio_debug.h" @@ -26,7 +27,13 @@ static void *sei_page; -static int chsc_error_from_response(int response) +/** + * chsc_error_from_response() - convert a chsc response to an error + * @response: chsc response code + * + * Returns an appropriate Linux error code for @response. + */ +int chsc_error_from_response(int response) { switch (response) { case 0x0001: @@ -44,6 +51,7 @@ static int chsc_error_from_response(int response) return -EIO; } } +EXPORT_SYMBOL_GPL(chsc_error_from_response); struct chsc_ssd_area { struct chsc_header request; @@ -127,77 +135,12 @@ out_free: return ret; } -static int check_for_io_on_path(struct subchannel *sch, int mask) -{ - int cc; - - cc = stsch(sch->schid, &sch->schib); - if (cc) - return 0; - if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask) - return 1; - return 0; -} - -static void terminate_internal_io(struct subchannel *sch) -{ - if (cio_clear(sch)) { - /* Recheck device in case clear failed. */ - sch->lpm = 0; - if (device_trigger_verify(sch) != 0) - css_schedule_eval(sch->schid); - return; - } - /* Request retry of internal operation. */ - device_set_intretry(sch); - /* Call handler. */ - if (sch->driver && sch->driver->termination) - sch->driver->termination(sch); -} - static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) { - int j; - int mask; - struct chp_id *chpid = data; - struct schib schib; - - for (j = 0; j < 8; j++) { - mask = 0x80 >> j; - if ((sch->schib.pmcw.pim & mask) && - (sch->schib.pmcw.chpid[j] == chpid->id)) - break; - } - if (j >= 8) - return 0; - spin_lock_irq(sch->lock); - - stsch(sch->schid, &schib); - if (!css_sch_is_valid(&schib)) - goto out_unreg; - memcpy(&sch->schib, &schib, sizeof(struct schib)); - /* Check for single path devices. */ - if (sch->schib.pmcw.pim == 0x80) - goto out_unreg; - - if (check_for_io_on_path(sch, mask)) { - if (device_is_online(sch)) - device_kill_io(sch); - else { - terminate_internal_io(sch); - /* Re-start path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - } - } else { - /* trigger path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - else if (sch->lpm == mask) + if (sch->driver && sch->driver->chp_event) + if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) goto out_unreg; - } - spin_unlock_irq(sch->lock); return 0; @@ -211,15 +154,18 @@ out_unreg: void chsc_chp_offline(struct chp_id chpid) { char dbf_txt[15]; + struct chp_link link; sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); if (chp_get_status(chpid) <= 0) return; + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); - for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); + for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); } static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) @@ -242,67 +188,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) return 0; } -struct res_acc_data { - struct chp_id chpid; - u32 fla_mask; - u16 fla; -}; - -static int get_res_chpid_mask(struct chsc_ssd_info *ssd, - struct res_acc_data *data) -{ - int i; - int mask; - - for (i = 0; i < 8; i++) { - mask = 0x80 >> i; - if (!(ssd->path_mask & mask)) - continue; - if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) - continue; - if ((ssd->fla_valid_mask & mask) && - ((ssd->fla[i] & data->fla_mask) != data->fla)) - continue; - return mask; - } - return 0; -} - static int __s390_process_res_acc(struct subchannel *sch, void *data) { - int chp_mask, old_lpm; - struct res_acc_data *res_data = data; - spin_lock_irq(sch->lock); - chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); - if (chp_mask == 0) - goto out; - if (stsch(sch->schid, &sch->schib)) - goto out; - old_lpm = sch->lpm; - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | chp_mask) & sch->opm; - if (!old_lpm && sch->lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); -out: + if (sch->driver && sch->driver->chp_event) + sch->driver->chp_event(sch, data, CHP_ONLINE); spin_unlock_irq(sch->lock); return 0; } -static void s390_process_res_acc (struct res_acc_data *res_data) +static void s390_process_res_acc(struct chp_link *link) { char dbf_txt[15]; - sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, - res_data->chpid.id); + sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, + link->chpid.id); CIO_TRACE_EVENT( 2, dbf_txt); - if (res_data->fla != 0) { - sprintf(dbf_txt, "fla%x", res_data->fla); + if (link->fla != 0) { + sprintf(dbf_txt, "fla%x", link->fla); CIO_TRACE_EVENT( 2, dbf_txt); } /* Wait until previous actions have settled. */ @@ -315,7 +219,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data) * will we have to do. */ for_each_subchannel_staged(__s390_process_res_acc, - s390_process_res_acc_new_sch, res_data); + s390_process_res_acc_new_sch, link); } static int @@ -388,7 +292,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) { - struct res_acc_data res_data; + struct chp_link link; struct chp_id chpid; int status; @@ -404,18 +308,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) chp_new(chpid); else if (!status) return; - memset(&res_data, 0, sizeof(struct res_acc_data)); - res_data.chpid = chpid; + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; if ((sei_area->vf & 0xc0) != 0) { - res_data.fla = sei_area->fla; + link.fla = sei_area->fla; if ((sei_area->vf & 0xc0) == 0xc0) /* full link address */ - res_data.fla_mask = 0xffff; + link.fla_mask = 0xffff; else /* link address */ - res_data.fla_mask = 0xff00; + link.fla_mask = 0xff00; } - s390_process_res_acc(&res_data); + s390_process_res_acc(&link); } struct chp_config_data { @@ -480,17 +384,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area) } } -void chsc_process_crw(void) +static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct chsc_sei_area *sei_area; + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); if (!sei_page) return; /* Access to sei_page is serialized through machine check handler * thread, so no need for locking. */ sei_area = sei_page; - CIO_TRACE_EVENT( 2, "prcss"); + CIO_TRACE_EVENT(2, "prcss"); do { memset(sei_area, 0, sizeof(*sei_area)); sei_area->request.length = 0x0010; @@ -509,114 +421,36 @@ void chsc_process_crw(void) } while (sei_area->flags & 0x80); } -static int __chp_add_new_sch(struct subchannel_id schid, void *data) -{ - struct schib schib; - - if (stsch_err(schid, &schib)) - /* We're through */ - return -ENXIO; - - /* Put it on the slow path. */ - css_schedule_eval(schid); - return 0; -} - - -static int __chp_add(struct subchannel *sch, void *data) -{ - int i, mask; - struct chp_id *chpid = data; - - spin_lock_irq(sch->lock); - for (i=0; i<8; i++) { - mask = 0x80 >> i; - if ((sch->schib.pmcw.pim & mask) && - (sch->schib.pmcw.chpid[i] == chpid->id)) - break; - } - if (i==8) { - spin_unlock_irq(sch->lock); - return 0; - } - if (stsch(sch->schid, &sch->schib)) { - spin_unlock_irq(sch->lock); - css_schedule_eval(sch->schid); - return 0; - } - sch->lpm = ((sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom) - | mask) & sch->opm; - - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - - spin_unlock_irq(sch->lock); - - return 0; -} - void chsc_chp_online(struct chp_id chpid) { char dbf_txt[15]; + struct chp_link link; sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); CIO_TRACE_EVENT(2, dbf_txt); if (chp_get_status(chpid) != 0) { + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); - for_each_subchannel_staged(__chp_add, __chp_add_new_sch, - &chpid); + for_each_subchannel_staged(__s390_process_res_acc, NULL, + &link); } } static void __s390_subchannel_vary_chpid(struct subchannel *sch, struct chp_id chpid, int on) { - int chp, old_lpm; - int mask; unsigned long flags; + struct chp_link link; + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; spin_lock_irqsave(sch->lock, flags); - old_lpm = sch->lpm; - for (chp = 0; chp < 8; chp++) { - mask = 0x80 >> chp; - if (!(sch->ssd_info.path_mask & mask)) - continue; - if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) - continue; - - if (on) { - sch->opm |= mask; - sch->lpm |= mask; - if (!old_lpm) - device_trigger_reprobe(sch); - else if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - break; - } - sch->opm &= ~mask; - sch->lpm &= ~mask; - if (check_for_io_on_path(sch, mask)) { - if (device_is_online(sch)) - /* Path verification is done after killing. */ - device_kill_io(sch); - else { - /* Kill and retry internal I/O. */ - terminate_internal_io(sch); - /* Re-start path verification. */ - if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - } - } else if (!sch->lpm) { - if (device_trigger_verify(sch) != 0) - css_schedule_eval(sch->schid); - } else if (sch->driver && sch->driver->verify) - sch->driver->verify(sch); - break; - } + if (sch->driver && sch->driver->chp_event) + sch->driver->chp_event(sch, &link, + on ? CHP_VARY_ON : CHP_VARY_OFF); spin_unlock_irqrestore(sch->lock, flags); } @@ -656,6 +490,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data) */ int chsc_chp_vary(struct chp_id chpid, int on) { + struct chp_link link; + + memset(&link, 0, sizeof(struct chp_link)); + link.chpid = chpid; /* Wait until previous actions have settled. */ css_wait_for_slow_path(); /* @@ -664,10 +502,10 @@ int chsc_chp_vary(struct chp_id chpid, int on) if (on) for_each_subchannel_staged(s390_subchannel_vary_chpid_on, - __s390_vary_chpid_on, &chpid); + __s390_vary_chpid_on, &link); else for_each_subchannel_staged(s390_subchannel_vary_chpid_off, - NULL, &chpid); + NULL, &link); return 0; } @@ -797,23 +635,33 @@ chsc_secm(struct channel_subsystem *css, int enable) return ret; } -int chsc_determine_channel_path_description(struct chp_id chpid, - struct channel_path_desc *desc) +int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, + int c, int m, + struct chsc_response_struct *resp) { int ccode, ret; struct { struct chsc_header request; - u32 : 24; + u32 : 2; + u32 m : 1; + u32 c : 1; + u32 fmt : 4; + u32 cssid : 8; + u32 : 4; + u32 rfmt : 4; u32 first_chpid : 8; u32 : 24; u32 last_chpid : 8; u32 zeroes1; struct chsc_header response; - u32 zeroes2; - struct channel_path_desc desc; + u8 data[PAGE_SIZE - 20]; } __attribute__ ((packed)) *scpd_area; + if ((rfmt == 1) && !css_general_characteristics.fcs) + return -EINVAL; + if ((rfmt == 2) && !css_general_characteristics.cib) + return -EINVAL; scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!scpd_area) return -ENOMEM; @@ -821,8 +669,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid, scpd_area->request.length = 0x0010; scpd_area->request.code = 0x0002; + scpd_area->cssid = chpid.cssid; scpd_area->first_chpid = chpid.id; scpd_area->last_chpid = chpid.id; + scpd_area->m = m; + scpd_area->c = c; + scpd_area->fmt = fmt; + scpd_area->rfmt = rfmt; ccode = chsc(scpd_area); if (ccode > 0) { @@ -833,8 +686,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid, ret = chsc_error_from_response(scpd_area->response.code); if (ret == 0) /* Success. */ - memcpy(desc, &scpd_area->desc, - sizeof(struct channel_path_desc)); + memcpy(resp, &scpd_area->response, scpd_area->response.length); else CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", scpd_area->response.code); @@ -842,6 +694,25 @@ out: free_page((unsigned long)scpd_area); return ret; } +EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); + +int chsc_determine_base_channel_path_desc(struct chp_id chpid, + struct channel_path_desc *desc) +{ + struct chsc_response_struct *chsc_resp; + int ret; + + chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); + if (!chsc_resp) + return -ENOMEM; + ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); + if (ret) + goto out_free; + memcpy(desc, &chsc_resp->data, chsc_resp->length); +out_free: + kfree(chsc_resp); + return ret; +} static void chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, @@ -937,15 +808,23 @@ out: int __init chsc_alloc_sei_area(void) { + int ret; + sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!sei_page) + if (!sei_page) { CIO_MSG_EVENT(0, "Can't allocate page for processing of " "chsc machine checks!\n"); - return (sei_page ? 0 : -ENOMEM); + return -ENOMEM; + } + ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw); + if (ret) + kfree(sei_page); + return ret; } void __init chsc_free_sei_area(void) { + s390_unregister_crw_handler(CRW_RSC_CSS); kfree(sei_page); } @@ -1043,3 +922,52 @@ exit: EXPORT_SYMBOL_GPL(css_general_characteristics); EXPORT_SYMBOL_GPL(css_chsc_characteristics); + +int chsc_sstpc(void *page, unsigned int op, u16 ctrl) +{ + struct { + struct chsc_header request; + unsigned int rsvd0; + unsigned int op : 8; + unsigned int rsvd1 : 8; + unsigned int ctrl : 16; + unsigned int rsvd2[5]; + struct chsc_header response; + unsigned int rsvd3[7]; + } __attribute__ ((packed)) *rr; + int rc; + + memset(page, 0, PAGE_SIZE); + rr = page; + rr->request.length = 0x0020; + rr->request.code = 0x0033; + rr->op = op; + rr->ctrl = ctrl; + rc = chsc(rr); + if (rc) + return -EIO; + rc = (rr->response.code == 0x0001) ? 0 : -EIO; + return rc; +} + +int chsc_sstpi(void *page, void *result, size_t size) +{ + struct { + struct chsc_header request; + unsigned int rsvd0[3]; + struct chsc_header response; + char data[size]; + } __attribute__ ((packed)) *rr; + int rc; + + memset(page, 0, PAGE_SIZE); + rr = page; + rr->request.length = 0x0010; + rr->request.code = 0x0038; + rc = chsc(rr); + if (rc) + return -EIO; + memcpy(result, &rr->data, size); + return (rr->response.code == 0x0001) ? 0 : -EIO; +} + diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index d1f5db1e69b9..ba59bceace98 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h @@ -4,7 +4,8 @@ #include <linux/types.h> #include <linux/device.h> #include <asm/chpid.h> -#include "schid.h" +#include <asm/chsc.h> +#include <asm/schid.h> #define CHSC_SDA_OC_MSS 0x2 @@ -36,14 +37,15 @@ struct channel_path_desc { struct channel_path; -extern void chsc_process_crw(void); - struct css_general_char { - u64 : 41; + u64 : 12; + u32 dynio : 1; /* bit 12 */ + u32 : 28; u32 aif : 1; /* bit 41 */ u32 : 3; u32 mcss : 1; /* bit 45 */ - u32 : 2; + u32 fcs : 1; /* bit 46 */ + u32 : 1; u32 ext_mb : 1; /* bit 48 */ u32 : 7; u32 aif_tdd : 1; /* bit 56 */ @@ -51,7 +53,11 @@ struct css_general_char { u32 qebsm : 1; /* bit 58 */ u32 : 8; u32 aif_osa : 1; /* bit 67 */ - u32 : 28; + u32 : 14; + u32 cib : 1; /* bit 82 */ + u32 : 5; + u32 fcx : 1; /* bit 88 */ + u32 : 7; }__attribute__((packed)); struct css_chsc_char { @@ -78,7 +84,6 @@ struct chsc_ssd_info { extern int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd); extern int chsc_determine_css_characteristics(void); -extern int css_characteristics_avail; extern int chsc_alloc_sei_area(void); extern void chsc_free_sei_area(void); @@ -87,10 +92,15 @@ struct channel_subsystem; extern int chsc_secm(struct channel_subsystem *, int); int chsc_chp_vary(struct chp_id chpid, int on); -int chsc_determine_channel_path_description(struct chp_id chpid, - struct channel_path_desc *desc); +int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, + int c, int m, + struct chsc_response_struct *resp); +int chsc_determine_base_channel_path_desc(struct chp_id chpid, + struct channel_path_desc *desc); void chsc_chp_online(struct chp_id chpid); void chsc_chp_offline(struct chp_id chpid); int chsc_get_channel_measurement_chars(struct channel_path *chp); +int chsc_error_from_response(int response); + #endif diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c new file mode 100644 index 000000000000..91ca87aa9f97 --- /dev/null +++ b/drivers/s390/cio/chsc_sch.c @@ -0,0 +1,820 @@ +/* + * Driver for s390 chsc subchannels + * + * Copyright IBM Corp. 2008 + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/miscdevice.h> + +#include <asm/cio.h> +#include <asm/chsc.h> +#include <asm/isc.h> + +#include "cio.h" +#include "cio_debug.h" +#include "css.h" +#include "chsc_sch.h" +#include "ioasm.h" + +static debug_info_t *chsc_debug_msg_id; +static debug_info_t *chsc_debug_log_id; + +#define CHSC_MSG(imp, args...) do { \ + debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \ + } while (0) + +#define CHSC_LOG(imp, txt) do { \ + debug_text_event(chsc_debug_log_id, imp , txt); \ + } while (0) + +static void CHSC_LOG_HEX(int level, void *data, int length) +{ + while (length > 0) { + debug_event(chsc_debug_log_id, level, data, length); + length -= chsc_debug_log_id->buf_size; + data += chsc_debug_log_id->buf_size; + } +} + +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("driver for s390 chsc subchannels"); +MODULE_LICENSE("GPL"); + +static void chsc_subchannel_irq(struct subchannel *sch) +{ + struct chsc_private *private = sch->private; + struct chsc_request *request = private->request; + struct irb *irb = (struct irb *)__LC_IRB; + + CHSC_LOG(4, "irb"); + CHSC_LOG_HEX(4, irb, sizeof(*irb)); + /* Copy irb to provided request and set done. */ + if (!request) { + CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", + sch->schid.ssid, sch->schid.sch_no); + return; + } + private->request = NULL; + memcpy(&request->irb, irb, sizeof(*irb)); + stsch(sch->schid, &sch->schib); + complete(&request->completion); + put_device(&sch->dev); +} + +static int chsc_subchannel_probe(struct subchannel *sch) +{ + struct chsc_private *private; + int ret; + + CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n", + sch->schid.ssid, sch->schid.sch_no); + sch->isc = CHSC_SCH_ISC; + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + if (ret) { + CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", + sch->schid.ssid, sch->schid.sch_no, ret); + kfree(private); + } else { + sch->private = private; + if (sch->dev.uevent_suppress) { + sch->dev.uevent_suppress = 0; + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } + } + return ret; +} + +static int chsc_subchannel_remove(struct subchannel *sch) +{ + struct chsc_private *private; + + cio_disable_subchannel(sch); + private = sch->private; + sch->private = NULL; + if (private->request) { + complete(&private->request->completion); + put_device(&sch->dev); + } + kfree(private); + return 0; +} + +static void chsc_subchannel_shutdown(struct subchannel *sch) +{ + cio_disable_subchannel(sch); +} + +static struct css_device_id chsc_subchannel_ids[] = { + { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); + +static struct css_driver chsc_subchannel_driver = { + .owner = THIS_MODULE, + .subchannel_type = chsc_subchannel_ids, + .irq = chsc_subchannel_irq, + .probe = chsc_subchannel_probe, + .remove = chsc_subchannel_remove, + .shutdown = chsc_subchannel_shutdown, + .name = "chsc_subchannel", +}; + +static int __init chsc_init_dbfs(void) +{ + chsc_debug_msg_id = debug_register("chsc_msg", 16, 1, + 16 * sizeof(long)); + if (!chsc_debug_msg_id) + goto out; + debug_register_view(chsc_debug_msg_id, &debug_sprintf_view); + debug_set_level(chsc_debug_msg_id, 2); + chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16); + if (!chsc_debug_log_id) + goto out; + debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view); + debug_set_level(chsc_debug_log_id, 2); + return 0; +out: + if (chsc_debug_msg_id) + debug_unregister(chsc_debug_msg_id); + return -ENOMEM; +} + +static void chsc_remove_dbfs(void) +{ + debug_unregister(chsc_debug_log_id); + debug_unregister(chsc_debug_msg_id); +} + +static int __init chsc_init_sch_driver(void) +{ + return css_driver_register(&chsc_subchannel_driver); +} + +static void chsc_cleanup_sch_driver(void) +{ + css_driver_unregister(&chsc_subchannel_driver); +} + +static DEFINE_SPINLOCK(chsc_lock); + +static int chsc_subchannel_match_next_free(struct device *dev, void *data) +{ + struct subchannel *sch = to_subchannel(dev); + + return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw); +} + +static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch) +{ + struct device *dev; + + dev = driver_find_device(&chsc_subchannel_driver.drv, + sch ? &sch->dev : NULL, NULL, + chsc_subchannel_match_next_free); + return dev ? to_subchannel(dev) : NULL; +} + +/** + * chsc_async() - try to start a chsc request asynchronously + * @chsc_area: request to be started + * @request: request structure to associate + * + * Tries to start a chsc request on one of the existing chsc subchannels. + * Returns: + * %0 if the request was performed synchronously + * %-EINPROGRESS if the request was successfully started + * %-EBUSY if all chsc subchannels are busy + * %-ENODEV if no chsc subchannels are available + * Context: + * interrupts disabled, chsc_lock held + */ +static int chsc_async(struct chsc_async_area *chsc_area, + struct chsc_request *request) +{ + int cc; + struct chsc_private *private; + struct subchannel *sch = NULL; + int ret = -ENODEV; + char dbf[10]; + + chsc_area->header.key = PAGE_DEFAULT_KEY; + while ((sch = chsc_get_next_subchannel(sch))) { + spin_lock(sch->lock); + private = sch->private; + if (private->request) { + spin_unlock(sch->lock); + ret = -EBUSY; + continue; + } + chsc_area->header.sid = sch->schid; + CHSC_LOG(2, "schid"); + CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid)); + cc = chsc(chsc_area); + sprintf(dbf, "cc:%d", cc); + CHSC_LOG(2, dbf); + switch (cc) { + case 0: + ret = 0; + break; + case 1: + sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC; + ret = -EINPROGRESS; + private->request = request; + break; + case 2: + ret = -EBUSY; + break; + default: + ret = -ENODEV; + } + spin_unlock(sch->lock); + CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n", + sch->schid.ssid, sch->schid.sch_no, cc); + if (ret == -EINPROGRESS) + return -EINPROGRESS; + put_device(&sch->dev); + if (ret == 0) + return 0; + } + return ret; +} + +static void chsc_log_command(struct chsc_async_area *chsc_area) +{ + char dbf[10]; + + sprintf(dbf, "CHSC:%x", chsc_area->header.code); + CHSC_LOG(0, dbf); + CHSC_LOG_HEX(0, chsc_area, 32); +} + +static int chsc_examine_irb(struct chsc_request *request) +{ + int backed_up; + + if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND) + return -EIO; + backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK; + request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK; + if (scsw_cstat(&request->irb.scsw) == 0) + return 0; + if (!backed_up) + return 0; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK) + return -EIO; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK) + return -EPERM; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK) + return -EAGAIN; + if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK) + return -EAGAIN; + return -EIO; +} + +static int chsc_ioctl_start(void __user *user_area) +{ + struct chsc_request *request; + struct chsc_async_area *chsc_area; + int ret; + char dbf[10]; + + if (!css_general_characteristics.dynio) + /* It makes no sense to try. */ + return -EOPNOTSUPP; + chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL); + if (!chsc_area) + return -ENOMEM; + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (!request) { + ret = -ENOMEM; + goto out_free; + } + init_completion(&request->completion); + if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) { + ret = -EFAULT; + goto out_free; + } + chsc_log_command(chsc_area); + spin_lock_irq(&chsc_lock); + ret = chsc_async(chsc_area, request); + spin_unlock_irq(&chsc_lock); + if (ret == -EINPROGRESS) { + wait_for_completion(&request->completion); + ret = chsc_examine_irb(request); + } + /* copy area back to user */ + if (!ret) + if (copy_to_user(user_area, chsc_area, PAGE_SIZE)) + ret = -EFAULT; +out_free: + sprintf(dbf, "ret:%d", ret); + CHSC_LOG(0, dbf); + kfree(request); + free_page((unsigned long)chsc_area); + return ret; +} + +static int chsc_ioctl_info_channel_path(void __user *user_cd) +{ + struct chsc_chp_cd *cd; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 8; + u32 first_chpid : 8; + u32 : 24; + u32 last_chpid : 8; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *scpcd_area; + + scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scpcd_area) + return -ENOMEM; + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cd, user_cd, sizeof(*cd))) { + ret = -EFAULT; + goto out_free; + } + scpcd_area->request.length = 0x0010; + scpcd_area->request.code = 0x0028; + scpcd_area->m = cd->m; + scpcd_area->fmt1 = cd->fmt; + scpcd_area->cssid = cd->chpid.cssid; + scpcd_area->first_chpid = cd->chpid.id; + scpcd_area->last_chpid = cd->chpid.id; + + ccode = chsc(scpcd_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (scpcd_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "scpcd: response code=%x\n", + scpcd_area->response.code); + goto out_free; + } + memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length); + if (copy_to_user(user_cd, cd, sizeof(*cd))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cd); + free_page((unsigned long)scpcd_area); + return ret; +} + +static int chsc_ioctl_info_cu(void __user *user_cd) +{ + struct chsc_cu_cd *cd; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 8; + u32 first_cun : 8; + u32 : 24; + u32 last_cun : 8; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *scucd_area; + + scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!scucd_area) + return -ENOMEM; + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cd, user_cd, sizeof(*cd))) { + ret = -EFAULT; + goto out_free; + } + scucd_area->request.length = 0x0010; + scucd_area->request.code = 0x0028; + scucd_area->m = cd->m; + scucd_area->fmt1 = cd->fmt; + scucd_area->cssid = cd->cssid; + scucd_area->first_cun = cd->cun; + scucd_area->last_cun = cd->cun; + + ccode = chsc(scucd_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (scucd_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "scucd: response code=%x\n", + scucd_area->response.code); + goto out_free; + } + memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length); + if (copy_to_user(user_cd, cd, sizeof(*cd))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cd); + free_page((unsigned long)scucd_area); + return ret; +} + +static int chsc_ioctl_info_sch_cu(void __user *user_cud) +{ + struct chsc_sch_cud *cud; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 5; + u32 fmt1 : 4; + u32 : 2; + u32 ssid : 2; + u32 first_sch : 16; + u32 : 8; + u32 cssid : 8; + u32 last_sch : 16; + u32 : 32; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *sscud_area; + + sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sscud_area) + return -ENOMEM; + cud = kzalloc(sizeof(*cud), GFP_KERNEL); + if (!cud) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(cud, user_cud, sizeof(*cud))) { + ret = -EFAULT; + goto out_free; + } + sscud_area->request.length = 0x0010; + sscud_area->request.code = 0x0006; + sscud_area->m = cud->schid.m; + sscud_area->fmt1 = cud->fmt; + sscud_area->ssid = cud->schid.ssid; + sscud_area->first_sch = cud->schid.sch_no; + sscud_area->cssid = cud->schid.cssid; + sscud_area->last_sch = cud->schid.sch_no; + + ccode = chsc(sscud_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sscud_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sscud: response code=%x\n", + sscud_area->response.code); + goto out_free; + } + memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length); + if (copy_to_user(user_cud, cud, sizeof(*cud))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(cud); + free_page((unsigned long)sscud_area); + return ret; +} + +static int chsc_ioctl_conf_info(void __user *user_ci) +{ + struct chsc_conf_info *ci; + int ret, ccode; + struct { + struct chsc_header request; + u32 : 2; + u32 m : 1; + u32 : 1; + u32 fmt1 : 4; + u32 cssid : 8; + u32 : 6; + u32 ssid : 2; + u32 : 8; + u64 : 64; + struct chsc_header response; + u8 data[PAGE_SIZE - 20]; + } __attribute__ ((packed)) *sci_area; + + sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sci_area) + return -ENOMEM; + ci = kzalloc(sizeof(*ci), GFP_KERNEL); + if (!ci) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(ci, user_ci, sizeof(*ci))) { + ret = -EFAULT; + goto out_free; + } + sci_area->request.length = 0x0010; + sci_area->request.code = 0x0012; + sci_area->m = ci->id.m; + sci_area->fmt1 = ci->fmt; + sci_area->cssid = ci->id.cssid; + sci_area->ssid = ci->id.ssid; + + ccode = chsc(sci_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sci_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sci: response code=%x\n", + sci_area->response.code); + goto out_free; + } + memcpy(&ci->scid, &sci_area->response, sci_area->response.length); + if (copy_to_user(user_ci, ci, sizeof(*ci))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(ci); + free_page((unsigned long)sci_area); + return ret; +} + +static int chsc_ioctl_conf_comp_list(void __user *user_ccl) +{ + struct chsc_comp_list *ccl; + int ret, ccode; + struct { + struct chsc_header request; + u32 ctype : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + u64 : 64; + u32 list_parm[2]; + u64 : 64; + struct chsc_header response; + u8 data[PAGE_SIZE - 36]; + } __attribute__ ((packed)) *sccl_area; + struct { + u32 m : 1; + u32 : 31; + u32 cssid : 8; + u32 : 16; + u32 chpid : 8; + } __attribute__ ((packed)) *chpid_parm; + struct { + u32 f_cssid : 8; + u32 l_cssid : 8; + u32 : 16; + u32 res; + } __attribute__ ((packed)) *cssids_parm; + + sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sccl_area) + return -ENOMEM; + ccl = kzalloc(sizeof(*ccl), GFP_KERNEL); + if (!ccl) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) { + ret = -EFAULT; + goto out_free; + } + sccl_area->request.length = 0x0020; + sccl_area->request.code = 0x0030; + sccl_area->fmt = ccl->req.fmt; + sccl_area->ctype = ccl->req.ctype; + switch (sccl_area->ctype) { + case CCL_CU_ON_CHP: + case CCL_IOP_CHP: + chpid_parm = (void *)&sccl_area->list_parm; + chpid_parm->m = ccl->req.chpid.m; + chpid_parm->cssid = ccl->req.chpid.chp.cssid; + chpid_parm->chpid = ccl->req.chpid.chp.id; + break; + case CCL_CSS_IMG: + case CCL_CSS_IMG_CONF_CHAR: + cssids_parm = (void *)&sccl_area->list_parm; + cssids_parm->f_cssid = ccl->req.cssids.f_cssid; + cssids_parm->l_cssid = ccl->req.cssids.l_cssid; + break; + } + ccode = chsc(sccl_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sccl_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sccl: response code=%x\n", + sccl_area->response.code); + goto out_free; + } + memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length); + if (copy_to_user(user_ccl, ccl, sizeof(*ccl))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(ccl); + free_page((unsigned long)sccl_area); + return ret; +} + +static int chsc_ioctl_chpd(void __user *user_chpd) +{ + struct chsc_cpd_info *chpd; + int ret; + + chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); + if (!chpd) + return -ENOMEM; + if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { + ret = -EFAULT; + goto out_free; + } + ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, + chpd->rfmt, chpd->c, chpd->m, + &chpd->chpdb); + if (ret) + goto out_free; + if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) + ret = -EFAULT; +out_free: + kfree(chpd); + return ret; +} + +static int chsc_ioctl_dcal(void __user *user_dcal) +{ + struct chsc_dcal *dcal; + int ret, ccode; + struct { + struct chsc_header request; + u32 atype : 8; + u32 : 4; + u32 fmt : 4; + u32 : 16; + u32 res0[2]; + u32 list_parm[2]; + u32 res1[2]; + struct chsc_header response; + u8 data[PAGE_SIZE - 36]; + } __attribute__ ((packed)) *sdcal_area; + + sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sdcal_area) + return -ENOMEM; + dcal = kzalloc(sizeof(*dcal), GFP_KERNEL); + if (!dcal) { + ret = -ENOMEM; + goto out_free; + } + if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) { + ret = -EFAULT; + goto out_free; + } + sdcal_area->request.length = 0x0020; + sdcal_area->request.code = 0x0034; + sdcal_area->atype = dcal->req.atype; + sdcal_area->fmt = dcal->req.fmt; + memcpy(&sdcal_area->list_parm, &dcal->req.list_parm, + sizeof(sdcal_area->list_parm)); + + ccode = chsc(sdcal_area); + if (ccode != 0) { + ret = -EIO; + goto out_free; + } + if (sdcal_area->response.code != 0x0001) { + ret = -EIO; + CHSC_MSG(0, "sdcal: response code=%x\n", + sdcal_area->response.code); + goto out_free; + } + memcpy(&dcal->sdcal, &sdcal_area->response, + sdcal_area->response.length); + if (copy_to_user(user_dcal, dcal, sizeof(*dcal))) + ret = -EFAULT; + else + ret = 0; +out_free: + kfree(dcal); + free_page((unsigned long)sdcal_area); + return ret; +} + +static long chsc_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd); + switch (cmd) { + case CHSC_START: + return chsc_ioctl_start((void __user *)arg); + case CHSC_INFO_CHANNEL_PATH: + return chsc_ioctl_info_channel_path((void __user *)arg); + case CHSC_INFO_CU: + return chsc_ioctl_info_cu((void __user *)arg); + case CHSC_INFO_SCH_CU: + return chsc_ioctl_info_sch_cu((void __user *)arg); + case CHSC_INFO_CI: + return chsc_ioctl_conf_info((void __user *)arg); + case CHSC_INFO_CCL: + return chsc_ioctl_conf_comp_list((void __user *)arg); + case CHSC_INFO_CPD: + return chsc_ioctl_chpd((void __user *)arg); + case CHSC_INFO_DCAL: + return chsc_ioctl_dcal((void __user *)arg); + default: /* unknown ioctl number */ + return -ENOIOCTLCMD; + } +} + +static const struct file_operations chsc_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = chsc_ioctl, + .compat_ioctl = chsc_ioctl, +}; + +static struct miscdevice chsc_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "chsc", + .fops = &chsc_fops, +}; + +static int __init chsc_misc_init(void) +{ + return misc_register(&chsc_misc_device); +} + +static void chsc_misc_cleanup(void) +{ + misc_deregister(&chsc_misc_device); +} + +static int __init chsc_sch_init(void) +{ + int ret; + + ret = chsc_init_dbfs(); + if (ret) + return ret; + isc_register(CHSC_SCH_ISC); + ret = chsc_init_sch_driver(); + if (ret) + goto out_dbf; + ret = chsc_misc_init(); + if (ret) + goto out_driver; + return ret; +out_driver: + chsc_cleanup_sch_driver(); +out_dbf: + isc_unregister(CHSC_SCH_ISC); + chsc_remove_dbfs(); + return ret; +} + +static void __exit chsc_sch_exit(void) +{ + chsc_misc_cleanup(); + chsc_cleanup_sch_driver(); + isc_unregister(CHSC_SCH_ISC); + chsc_remove_dbfs(); +} + +module_init(chsc_sch_init); +module_exit(chsc_sch_exit); diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h new file mode 100644 index 000000000000..589ebfad6aad --- /dev/null +++ b/drivers/s390/cio/chsc_sch.h @@ -0,0 +1,13 @@ +#ifndef _CHSC_SCH_H +#define _CHSC_SCH_H + +struct chsc_request { + struct completion completion; + struct irb irb; +}; + +struct chsc_private { + struct chsc_request *request; +}; + +#endif diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 82c6a2d45128..33bff8fec7d1 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -2,7 +2,7 @@ * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls * - * Copyright (C) IBM Corp. 1999,2006 + * Copyright IBM Corp. 1999,2008 * Author(s): Ingo Adlung (adlung@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Arnd Bergmann (arndb@de.ibm.com) @@ -24,7 +24,9 @@ #include <asm/ipl.h> #include <asm/chpid.h> #include <asm/airq.h> +#include <asm/isc.h> #include <asm/cpu.h> +#include <asm/fcx.h> #include "cio.h" #include "css.h" #include "chsc.h" @@ -72,7 +74,6 @@ out_unregister: debug_unregister(cio_debug_trace_id); if (cio_debug_crw_id) debug_unregister(cio_debug_crw_id); - printk(KERN_WARNING"cio: could not initialize debugging\n"); return -1; } @@ -128,7 +129,7 @@ cio_tpi(void) local_bh_disable(); irq_enter (); spin_lock(sch->lock); - memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); + memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); if (sch->driver && sch->driver->irq) sch->driver->irq(sch); spin_unlock(sch->lock); @@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ { char dbf_txt[15]; int ccode; - struct orb *orb; + union orb *orb; CIO_TRACE_EVENT(4, "stIO"); CIO_TRACE_EVENT(4, sch->dev.bus_id); orb = &to_io_private(sch)->orb; /* sch is always under 2G. */ - orb->intparm = (u32)(addr_t)sch; - orb->fmt = 1; + orb->cmd.intparm = (u32)(addr_t)sch; + orb->cmd.fmt = 1; - orb->pfch = sch->options.prefetch == 0; - orb->spnd = sch->options.suspend; - orb->ssic = sch->options.suspend && sch->options.inter; - orb->lpm = (lpm != 0) ? lpm : sch->lpm; + orb->cmd.pfch = sch->options.prefetch == 0; + orb->cmd.spnd = sch->options.suspend; + orb->cmd.ssic = sch->options.suspend && sch->options.inter; + orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; #ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only */ - orb->c64 = 1; - orb->i2k = 0; + orb->cmd.c64 = 1; + orb->cmd.i2k = 0; #endif - orb->key = key >> 4; + orb->cmd.key = key >> 4; /* issue "Start Subchannel" */ - orb->cpa = (__u32) __pa(cpa); + orb->cmd.cpa = (__u32) __pa(cpa); ccode = ssch(sch->schid, orb); /* process condition code */ @@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ /* * initialize device status information */ - sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; return 0; case 1: /* status pending */ case 2: /* busy */ @@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch) switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND; return 0; case 1: return -EBUSY; @@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch) switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; return 0; case 1: /* status pending */ case 2: /* busy */ @@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch) switch (ccode) { case 0: - sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; + sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND; return 0; default: /* device not operational */ return -ENODEV; @@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch) return ret; } -/* - * Enable subchannel. +/** + * cio_enable_subchannel - enable a subchannel. + * @sch: subchannel to be enabled + * @intparm: interruption parameter to set */ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) { @@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) CIO_TRACE_EVENT (2, dbf_txt); return ret; } +EXPORT_SYMBOL_GPL(cio_enable_subchannel); -/* - * Disable subchannel. +/** + * cio_disable_subchannel - disable a subchannel. + * @sch: subchannel to disable */ -int -cio_disable_subchannel (struct subchannel *sch) +int cio_disable_subchannel(struct subchannel *sch) { char dbf_txt[15]; int ccode; @@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch) if (ccode == 3) /* Not operational. */ return -ENODEV; - if (sch->schib.scsw.actl != 0) + if (scsw_actl(&sch->schib.scsw) != 0) /* * the disable function must not be called while there are * requests pending for completion ! @@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch) CIO_TRACE_EVENT (2, dbf_txt); return ret; } +EXPORT_SYMBOL_GPL(cio_disable_subchannel); int cio_create_sch_lock(struct subchannel *sch) { @@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch) return 0; } -/* - * cio_validate_subchannel() +static int cio_check_devno_blacklisted(struct subchannel *sch) +{ + if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { + /* + * This device must not be known to Linux. So we simply + * say that there is no device and return ENODEV. + */ + CIO_MSG_EVENT(6, "Blacklisted device detected " + "at devno %04X, subchannel set %x\n", + sch->schib.pmcw.dev, sch->schid.ssid); + return -ENODEV; + } + return 0; +} + +static int cio_validate_io_subchannel(struct subchannel *sch) +{ + /* Initialization for io subchannels. */ + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + + /* Devno is valid. */ + return cio_check_devno_blacklisted(sch); +} + +static int cio_validate_msg_subchannel(struct subchannel *sch) +{ + /* Initialization for message subchannels. */ + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + + /* Devno is valid. */ + return cio_check_devno_blacklisted(sch); +} + +/** + * cio_validate_subchannel - basic validation of subchannel + * @sch: subchannel structure to be filled out + * @schid: subchannel id * * Find out subchannel type and initialize struct subchannel. * Return codes: - * SUBCHANNEL_TYPE_IO for a normal io subchannel - * SUBCHANNEL_TYPE_CHSC for a chsc subchannel - * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel - * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel + * 0 on success * -ENXIO for non-defined subchannels - * -ENODEV for subchannels with invalid device number or blacklisted devices + * -ENODEV for invalid subchannels or blacklisted devices + * -EIO for subchannels in an invalid subchannel set */ -int -cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) +int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) { char dbf_txt[15]; int ccode; int err; - sprintf (dbf_txt, "valsch%x", schid.sch_no); - CIO_TRACE_EVENT (4, dbf_txt); + sprintf(dbf_txt, "valsch%x", schid.sch_no); + CIO_TRACE_EVENT(4, dbf_txt); /* Nuke all fields. */ memset(sch, 0, sizeof(struct subchannel)); @@ -546,65 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) /* Copy subchannel type from path management control word. */ sch->st = sch->schib.pmcw.st; - /* - * ... just being curious we check for non I/O subchannels - */ - if (sch->st != 0) { - CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " - "non-I/O subchannel type %04X\n", - sch->schid.ssid, sch->schid.sch_no, sch->st); - /* We stop here for non-io subchannels. */ - err = sch->st; - goto out; + switch (sch->st) { + case SUBCHANNEL_TYPE_IO: + err = cio_validate_io_subchannel(sch); + break; + case SUBCHANNEL_TYPE_MSG: + err = cio_validate_msg_subchannel(sch); + break; + default: + err = 0; } - - /* Initialization for io subchannels. */ - if (!css_sch_is_valid(&sch->schib)) { - err = -ENODEV; + if (err) goto out; - } - /* Devno is valid. */ - if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { - /* - * This device must not be known to Linux. So we simply - * say that there is no device and return ENODEV. - */ - CIO_MSG_EVENT(6, "Blacklisted device detected " - "at devno %04X, subchannel set %x\n", - sch->schib.pmcw.dev, sch->schid.ssid); - err = -ENODEV; - goto out; - } - if (cio_is_console(sch->schid)) - sch->opm = 0xff; - else - sch->opm = chp_get_sch_opm(sch); - sch->lpm = sch->schib.pmcw.pam & sch->opm; - sch->isc = 3; - - CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " - "- PIM = %02X, PAM = %02X, POM = %02X\n", - sch->schib.pmcw.dev, sch->schid.ssid, - sch->schid.sch_no, sch->schib.pmcw.pim, - sch->schib.pmcw.pam, sch->schib.pmcw.pom); - - /* - * We now have to initially ... - * ... enable "concurrent sense" - * ... enable "multipath mode" if more than one - * CHPID is available. This is done regardless - * whether multiple paths are available for us. - */ - sch->schib.pmcw.csense = 1; /* concurrent sense */ - sch->schib.pmcw.ena = 0; - if ((sch->lpm & (sch->lpm - 1)) != 0) - sch->schib.pmcw.mp = 1; /* multipath mode */ - /* clean up possible residual cmf stuff */ - sch->schib.pmcw.mme = 0; - sch->schib.pmcw.mbfc = 0; - sch->schib.pmcw.mbi = 0; - sch->schib.mba = 0; + CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", + sch->schid.ssid, sch->schid.sch_no, sch->st); return 0; out: if (!cio_is_console(schid)) @@ -645,7 +640,7 @@ do_IRQ (struct pt_regs *regs) */ if (tpi_info->adapter_IO == 1 && tpi_info->int_type == IO_INTERRUPT_TYPE) { - do_adapter_IO(); + do_adapter_IO(tpi_info->isc); continue; } sch = (struct subchannel *)(unsigned long)tpi_info->intparm; @@ -704,9 +699,9 @@ void wait_cons_dev(void) if (!console_subchannel_in_use) return; - /* disable all but isc 7 (console device) */ + /* disable all but the console isc */ __ctl_store (save_cr6, 6, 6); - cr6 = 0x01000000; + cr6 = 1UL << (31 - CONSOLE_ISC); __ctl_load (cr6, 6, 6); do { @@ -714,7 +709,7 @@ void wait_cons_dev(void) if (!cio_tpi()) cpu_relax(); spin_lock(console_subchannel.lock); - } while (console_subchannel.schib.scsw.actl != 0); + } while (console_subchannel.schib.scsw.cmd.actl != 0); /* * restore previous isc value */ @@ -759,7 +754,6 @@ cio_get_console_sch_no(void) /* unlike in 2.4, we cannot autoprobe here, since * the channel subsystem is not fully initialized. * With some luck, the HWC console can take over */ - printk(KERN_WARNING "cio: No ccw console found!\n"); return -1; } return console_irq; @@ -776,6 +770,7 @@ cio_probe_console(void) sch_no = cio_get_console_sch_no(); if (sch_no == -1) { console_subchannel_in_use = 0; + printk(KERN_WARNING "cio: No ccw console found!\n"); return ERR_PTR(-ENODEV); } memset(&console_subchannel, 0, sizeof(struct subchannel)); @@ -788,15 +783,15 @@ cio_probe_console(void) } /* - * enable console I/O-interrupt subclass 7 + * enable console I/O-interrupt subclass */ - ctl_set_bit(6, 24); - console_subchannel.isc = 7; - console_subchannel.schib.pmcw.isc = 7; + isc_register(CONSOLE_ISC); + console_subchannel.schib.pmcw.isc = CONSOLE_ISC; console_subchannel.schib.pmcw.intparm = (u32)(addr_t)&console_subchannel; ret = cio_modify(&console_subchannel); if (ret) { + isc_unregister(CONSOLE_ISC); console_subchannel_in_use = 0; return ERR_PTR(ret); } @@ -808,7 +803,7 @@ cio_release_console(void) { console_subchannel.schib.pmcw.intparm = 0; cio_modify(&console_subchannel); - ctl_clear_bit(6, 24); + isc_unregister(CONSOLE_ISC); console_subchannel_in_use = 0; } @@ -862,7 +857,7 @@ static void udelay_reset(unsigned long usecs) } static int -__clear_subchannel_easy(struct subchannel_id schid) +__clear_io_subchannel_easy(struct subchannel_id schid) { int retry; @@ -881,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid) return -EBUSY; } +static void __clear_chsc_subchannel_easy(void) +{ + /* It seems we can only wait for a bit here :/ */ + udelay_reset(100); +} + static int pgm_check_occured; static void cio_reset_pgm_check_handler(void) @@ -919,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data) case -ENODEV: break; default: /* -EBUSY */ - if (__clear_subchannel_easy(schid)) - break; /* give up... */ + switch (schib.pmcw.st) { + case SUBCHANNEL_TYPE_IO: + if (__clear_io_subchannel_easy(schid)) + goto out; /* give up... */ + break; + case SUBCHANNEL_TYPE_CHSC: + __clear_chsc_subchannel_easy(); + break; + default: + /* No default clear strategy */ + break; + } stsch(schid, &schib); __disable_subchannel_easy(schid, &schib); } +out: return 0; } @@ -1066,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo) iplinfo->is_qdio = schib.pmcw.qf; return 0; } + +/** + * cio_tm_start_key - perform start function + * @sch: subchannel on which to perform the start function + * @tcw: transport-command word to be started + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * + * Start the tcw on the given subchannel. Return zero on success, non-zero + * otherwise. + */ +int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) +{ + int cc; + union orb *orb = &to_io_private(sch)->orb; + + memset(orb, 0, sizeof(union orb)); + orb->tm.intparm = (u32) (addr_t) sch; + orb->tm.key = key >> 4; + orb->tm.b = 1; + orb->tm.lpm = lpm ? lpm : sch->lpm; + orb->tm.tcw = (u32) (addr_t) tcw; + cc = ssch(sch->schid, orb); + switch (cc) { + case 0: + return 0; + case 1: + case 2: + return -EBUSY; + default: + return cio_start_handle_notoper(sch, lpm); + } +} + +/** + * cio_tm_intrg - perform interrogate function + * @sch - subchannel on which to perform the interrogate function + * + * If the specified subchannel is running in transport-mode, perform the + * interrogate function. Return zero on success, non-zero otherwie. + */ +int cio_tm_intrg(struct subchannel *sch) +{ + int cc; + + if (!to_io_private(sch)->orb.tm.b) + return -EINVAL; + cc = xsch(sch->schid); + switch (cc) { + case 0: + case 2: + return 0; + case 1: + return -EBUSY; + default: + return -ENODEV; + } +} diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 6e933aebe013..3b236d20e835 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -3,9 +3,12 @@ #include <linux/mutex.h> #include <linux/device.h> +#include <linux/mod_devicetable.h> #include <asm/chpid.h> +#include <asm/cio.h> +#include <asm/fcx.h> +#include <asm/schid.h> #include "chsc.h" -#include "schid.h" /* * path management control word @@ -13,7 +16,7 @@ struct pmcw { u32 intparm; /* interruption parameter */ u32 qf : 1; /* qdio facility */ - u32 res0 : 1; /* reserved zeros */ + u32 w : 1; u32 isc : 3; /* interruption sublass */ u32 res5 : 3; /* reserved zeros */ u32 ena : 1; /* enabled */ @@ -47,7 +50,7 @@ struct pmcw { */ struct schib { struct pmcw pmcw; /* path management control word */ - struct scsw scsw; /* subchannel status word */ + union scsw scsw; /* subchannel status word */ __u64 mba; /* measurement block address */ __u8 mda[4]; /* model dependent area */ } __attribute__ ((packed,aligned(4))); @@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int); extern int cio_get_options (struct subchannel *); extern int cio_modify (struct subchannel *); +int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); +int cio_tm_intrg(struct subchannel *sch); + int cio_create_sch_lock(struct subchannel *); -void do_adapter_IO(void); +void do_adapter_IO(u8 isc); void do_IRQ(struct pt_regs *); /* Use with care. */ diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 2808b6833b9e..a90b28c0be57 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev) if (stsch(sch->schid, &sch->schib)) return -ENODEV; - if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { + if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { /* Don't copy if a start function is in progress. */ - if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && - (sch->schib.scsw.actl & + if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) && + (scsw_actl(&sch->schib.scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && - (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) + (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS))) return -EBUSY; } cmb_data = cdev->private->cmb; @@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev) free_pages((unsigned long)mem, get_order(size)); } else if (!mem) { /* no luck */ - printk(KERN_WARNING "cio: failed to allocate area " - "for measuring %d subchannels\n", - cmb_area.num_channels); ret = -ENOMEM; goto out; } else { @@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev, switch (val) { case 0: ret = disable_cmf(cdev); - if (ret) - dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret); break; case 1: ret = enable_cmf(cdev); - if (ret && ret != -EBUSY) - dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret); break; } @@ -1344,8 +1337,7 @@ static int __init init_cmf(void) * to basic mode. */ if (format == CMF_AUTODETECT) { - if (!css_characteristics_avail || - !css_general_characteristics.ext_mb) { + if (!css_general_characteristics.ext_mb) { format = CMF_BASIC; } else { format = CMF_EXTENDED; @@ -1365,8 +1357,6 @@ static int __init init_cmf(void) cmbops = &cmbops_extended; break; default: - printk(KERN_ERR "cio: Invalid format %d for channel " - "measurement facility\n", format); return 1; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a76956512b2d..46c021d880dc 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -2,8 +2,7 @@ * drivers/s390/cio/css.c * driver for channel subsystem * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002,2008 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) */ @@ -14,7 +13,9 @@ #include <linux/errno.h> #include <linux/list.h> #include <linux/reboot.h> +#include <asm/isc.h> +#include "../s390mach.h" #include "css.h" #include "cio.h" #include "cio_debug.h" @@ -30,8 +31,6 @@ static int max_ssid = 0; struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; -int css_characteristics_avail = 0; - int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) { @@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid) kfree(sch); return ERR_PTR(ret); } - - if (sch->st != SUBCHANNEL_TYPE_IO) { - /* For now we ignore all non-io subchannels. */ - kfree(sch); - return ERR_PTR(-EINVAL); - } - - /* - * Set intparm to subchannel address. - * This is fine even on 64bit since the subchannel is always located - * under 2G. - */ - sch->schib.pmcw.intparm = (u32)(addr_t)sch; - ret = cio_modify(sch); - if (ret) { - kfree(sch->lock); - kfree(sch); - return ERR_PTR(ret); - } return sch; } @@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch) return ret; } +/** + * css_sch_device_unregister - unregister a subchannel + * @sch: subchannel to be unregistered + */ void css_sch_device_unregister(struct subchannel *sch) { mutex_lock(&sch->reg_mutex); - device_unregister(&sch->dev); + if (device_is_registered(&sch->dev)) + device_unregister(&sch->dev); mutex_unlock(&sch->reg_mutex); } +EXPORT_SYMBOL_GPL(css_sch_device_unregister); static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) { @@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch) } } +static ssize_t type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "%01x\n", sch->st); +} + +static DEVICE_ATTR(type, 0444, type_show, NULL); + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + + return sprintf(buf, "css:t%01X\n", sch->st); +} + +static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); + +static struct attribute *subch_attrs[] = { + &dev_attr_type.attr, + &dev_attr_modalias.attr, + NULL, +}; + +static struct attribute_group subch_attr_group = { + .attrs = subch_attrs, +}; + +static struct attribute_group *default_subch_attr_groups[] = { + &subch_attr_group, + NULL, +}; + static int css_register_subchannel(struct subchannel *sch) { int ret; @@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch) sch->dev.parent = &channel_subsystems[0]->device; sch->dev.bus = &css_bus_type; sch->dev.release = &css_subchannel_release; - sch->dev.groups = subch_attr_groups; + sch->dev.groups = default_subch_attr_groups; /* * We don't want to generate uevents for I/O subchannels that don't * have a working ccw device behind them since they will be * unregistered before they can be used anyway, so we delay the add * uevent until after device recognition was successful. + * Note that we suppress the uevent for all subchannel types; + * the subchannel driver can decide itself when it wants to inform + * userspace of its existence. */ - if (!cio_is_console(sch->schid)) - /* Console is special, no need to suppress. */ - sch->dev.uevent_suppress = 1; + sch->dev.uevent_suppress = 1; css_update_ssd_info(sch); /* make it known to the system */ ret = css_sch_device_register(sch); @@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch) sch->schid.ssid, sch->schid.sch_no, ret); return ret; } + if (!sch->driver) { + /* + * No driver matched. Generate the uevent now so that + * a fitting driver module may be loaded based on the + * modalias. + */ + sch->dev.uevent_suppress = 0; + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); + } return ret; } -static int css_probe_device(struct subchannel_id schid) +int css_probe_device(struct subchannel_id schid) { int ret; struct subchannel *sch; @@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib) { if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) return 0; + if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) + return 0; return 1; } EXPORT_SYMBOL_GPL(css_sch_is_valid); -static int css_get_subchannel_status(struct subchannel *sch) -{ - struct schib schib; - - if (stsch(sch->schid, &schib)) - return CIO_GONE; - if (!css_sch_is_valid(&schib)) - return CIO_GONE; - if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) - return CIO_REVALIDATE; - if (!sch->lpm) - return CIO_NO_PATH; - return CIO_OPER; -} - -static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) -{ - int event, ret, disc; - unsigned long flags; - enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; - - spin_lock_irqsave(sch->lock, flags); - disc = device_is_disconnected(sch); - if (disc && slow) { - /* Disconnected devices are evaluated directly only.*/ - spin_unlock_irqrestore(sch->lock, flags); - return 0; - } - /* No interrupt after machine check - kill pending timers. */ - device_kill_pending_timer(sch); - if (!disc && !slow) { - /* Non-disconnected devices are evaluated on the slow path. */ - spin_unlock_irqrestore(sch->lock, flags); - return -EAGAIN; - } - event = css_get_subchannel_status(sch); - CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", - sch->schid.ssid, sch->schid.sch_no, event, - disc ? "disconnected" : "normal", - slow ? "slow" : "fast"); - /* Analyze subchannel status. */ - action = NONE; - switch (event) { - case CIO_NO_PATH: - if (disc) { - /* Check if paths have become available. */ - action = REPROBE; - break; - } - /* fall through */ - case CIO_GONE: - /* Prevent unwanted effects when opening lock. */ - cio_disable_subchannel(sch); - device_set_disconnected(sch); - /* Ask driver what to do with device. */ - action = UNREGISTER; - if (sch->driver && sch->driver->notify) { - spin_unlock_irqrestore(sch->lock, flags); - ret = sch->driver->notify(sch, event); - spin_lock_irqsave(sch->lock, flags); - if (ret) - action = NONE; - } - break; - case CIO_REVALIDATE: - /* Device will be removed, so no notify necessary. */ - if (disc) - /* Reprobe because immediate unregister might block. */ - action = REPROBE; - else - action = UNREGISTER_PROBE; - break; - case CIO_OPER: - if (disc) - /* Get device operational again. */ - action = REPROBE; - break; - } - /* Perform action. */ - ret = 0; - switch (action) { - case UNREGISTER: - case UNREGISTER_PROBE: - /* Unregister device (will use subchannel lock). */ - spin_unlock_irqrestore(sch->lock, flags); - css_sch_device_unregister(sch); - spin_lock_irqsave(sch->lock, flags); - - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - break; - case REPROBE: - device_trigger_reprobe(sch); - break; - default: - break; - } - spin_unlock_irqrestore(sch->lock, flags); - /* Probe if necessary. */ - if (action == UNREGISTER_PROBE) - ret = css_probe_device(sch->schid); - - return ret; -} - static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) { struct schib schib; @@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) return css_probe_device(schid); } +static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) +{ + int ret = 0; + + if (sch->driver) { + if (sch->driver->sch_event) + ret = sch->driver->sch_event(sch, slow); + else + dev_dbg(&sch->dev, + "Got subchannel machine check but " + "no sch_event handler provided.\n"); + } + return ret; +} + static void css_evaluate_subchannel(struct subchannel_id schid, int slow) { struct subchannel *sch; @@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe); /* * Called from the machine check handler for subchannel report words. */ -void css_process_crw(int rsid1, int rsid2) +static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) { struct subchannel_id mchk_schid; - CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", - rsid1, rsid2); + if (overflow) { + css_schedule_eval_all(); + return; + } + CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, + crw0->erc, crw0->rsid); + if (crw1) + CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, + crw1->anc, crw1->erc, crw1->rsid); init_subchannel_id(&mchk_schid); - mchk_schid.sch_no = rsid1; - if (rsid2 != 0) - mchk_schid.ssid = (rsid2 >> 8) & 3; + mchk_schid.sch_no = crw0->rsid; + if (crw1) + mchk_schid.ssid = (crw1->rsid >> 8) & 3; - /* + /* * Since we are always presented with IPI in the CRW, we have to * use stsch() to find out if the subchannel in question has come * or gone. @@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data) static void __init css_generate_pgid(struct channel_subsystem *css, u32 tod_high) { - if (css_characteristics_avail && css_general_characteristics.mcss) { + if (css_general_characteristics.mcss) { css->global_pgid.pgid_high.ext_cssid.version = 0x80; css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; } else { @@ -795,8 +748,6 @@ init_channel_subsystem (void) ret = chsc_determine_css_characteristics(); if (ret == -ENOMEM) goto out; /* No need to continue. */ - if (ret == 0) - css_characteristics_avail = 1; ret = chsc_alloc_sei_area(); if (ret) @@ -806,6 +757,10 @@ init_channel_subsystem (void) if (ret) goto out; + ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw); + if (ret) + goto out; + if ((ret = bus_register(&css_bus_type))) goto out; @@ -836,8 +791,7 @@ init_channel_subsystem (void) ret = device_register(&css->device); if (ret) goto out_free_all; - if (css_characteristics_avail && - css_chsc_characteristics.secm) { + if (css_chsc_characteristics.secm) { ret = device_create_file(&css->device, &dev_attr_cm_enable); if (ret) @@ -852,7 +806,8 @@ init_channel_subsystem (void) goto out_pseudo; css_init_done = 1; - ctl_set_bit(6, 28); + /* Enable default isc for I/O subchannels. */ + isc_register(IO_SCH_ISC); for_each_subchannel(__init_channel_subsystem, NULL); return 0; @@ -875,7 +830,7 @@ out_unregister: i--; css = channel_subsystems[i]; device_unregister(&css->pseudo_subchannel->dev); - if (css_characteristics_avail && css_chsc_characteristics.secm) + if (css_chsc_characteristics.secm) device_remove_file(&css->device, &dev_attr_cm_enable); device_unregister(&css->device); @@ -883,6 +838,7 @@ out_unregister: out_bus: bus_unregister(&css_bus_type); out: + s390_unregister_crw_handler(CRW_RSC_CSS); chsc_free_sei_area(); kfree(slow_subchannel_set); printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", @@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch) return sch == to_css(sch->dev.parent)->pseudo_subchannel; } -/* - * find a driver for a subchannel. They identify by the subchannel - * type with the exception that the console subchannel driver has its own - * subchannel type although the device is an i/o subchannel - */ -static int -css_bus_match (struct device *dev, struct device_driver *drv) +static int css_bus_match(struct device *dev, struct device_driver *drv) { struct subchannel *sch = to_subchannel(dev); struct css_driver *driver = to_cssdriver(drv); + struct css_device_id *id; - if (sch->st == driver->subchannel_type) - return 1; + for (id = driver->subchannel_type; id->match_flags; id++) { + if (sch->st == id->type) + return 1; + } return 0; } @@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev) sch->driver->shutdown(sch); } +static int css_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct subchannel *sch = to_subchannel(dev); + int ret; + + ret = add_uevent_var(env, "ST=%01X", sch->st); + if (ret) + return ret; + ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); + return ret; +} + struct bus_type css_bus_type = { .name = "css", .match = css_bus_match, .probe = css_probe, .remove = css_remove, .shutdown = css_shutdown, + .uevent = css_uevent, }; /** @@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(css_bus_type); -EXPORT_SYMBOL_GPL(css_characteristics_avail); diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index e1913518f354..57ebf120f825 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h @@ -9,8 +9,7 @@ #include <asm/cio.h> #include <asm/chpid.h> - -#include "schid.h" +#include <asm/schid.h> /* * path grouping stuff @@ -58,20 +57,28 @@ struct pgid { __u32 tod_high; /* high word TOD clock */ } __attribute__ ((packed)); -/* - * A css driver handles all subchannels of one type. - * Currently, we only care about I/O subchannels (type 0), these - * have a ccw_device connected to them. - */ struct subchannel; +struct chp_link; +/** + * struct css_driver - device driver for subchannels + * @owner: owning module + * @subchannel_type: subchannel type supported by this driver + * @drv: embedded device driver structure + * @irq: called on interrupts + * @chp_event: called for events affecting a channel path + * @sch_event: called for events affecting the subchannel + * @probe: function called on probe + * @remove: function called on remove + * @shutdown: called at device shutdown + * @name: name of the device driver + */ struct css_driver { struct module *owner; - unsigned int subchannel_type; + struct css_device_id *subchannel_type; struct device_driver drv; void (*irq)(struct subchannel *); - int (*notify)(struct subchannel *, int); - void (*verify)(struct subchannel *); - void (*termination)(struct subchannel *); + int (*chp_event)(struct subchannel *, struct chp_link *, int); + int (*sch_event)(struct subchannel *, int); int (*probe)(struct subchannel *); int (*remove)(struct subchannel *); void (*shutdown)(struct subchannel *); @@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *); extern void css_driver_unregister(struct css_driver *); extern void css_sch_device_unregister(struct subchannel *); -extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); +extern int css_probe_device(struct subchannel_id); +extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); extern int css_init_done; int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), int (*fn_unknown)(struct subchannel_id, void *), void *data); extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); -extern void css_process_crw(int, int); extern void css_reiterate_subchannels(void); void css_update_ssd_info(struct subchannel *sch); @@ -121,20 +128,6 @@ struct channel_subsystem { extern struct bus_type css_bus_type; extern struct channel_subsystem *channel_subsystems[]; -/* Some helper functions for disconnected state. */ -int device_is_disconnected(struct subchannel *); -void device_set_disconnected(struct subchannel *); -void device_trigger_reprobe(struct subchannel *); - -/* Helper functions for vary on/off. */ -int device_is_online(struct subchannel *); -void device_kill_io(struct subchannel *); -void device_set_intretry(struct subchannel *sch); -int device_trigger_verify(struct subchannel *sch); - -/* Machine check helper function. */ -void device_kill_pending_timer(struct subchannel *); - /* Helper functions to build lists for the slow path. */ void css_schedule_eval(struct subchannel_id schid); void css_schedule_eval_all(void); @@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *); extern struct workqueue_struct *slow_path_wq; void css_wait_for_slow_path(void); - -extern struct attribute_group *subch_attr_groups[]; #endif diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e22813db74a2..e818d0c54c09 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -2,8 +2,7 @@ * drivers/s390/cio/device.c * bus driver for ccw devices * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002,2008 * Author(s): Arnd Bergmann (arndb@de.ibm.com) * Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) @@ -23,7 +22,9 @@ #include <asm/cio.h> #include <asm/param.h> /* HZ */ #include <asm/cmb.h> +#include <asm/isc.h> +#include "chp.h" #include "cio.h" #include "cio_debug.h" #include "css.h" @@ -125,19 +126,24 @@ struct bus_type ccw_bus_type; static void io_subchannel_irq(struct subchannel *); static int io_subchannel_probe(struct subchannel *); static int io_subchannel_remove(struct subchannel *); -static int io_subchannel_notify(struct subchannel *, int); -static void io_subchannel_verify(struct subchannel *); -static void io_subchannel_ioterm(struct subchannel *); static void io_subchannel_shutdown(struct subchannel *); +static int io_subchannel_sch_event(struct subchannel *, int); +static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, + int); + +static struct css_device_id io_subchannel_ids[] = { + { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, io_subchannel_ids); static struct css_driver io_subchannel_driver = { .owner = THIS_MODULE, - .subchannel_type = SUBCHANNEL_TYPE_IO, + .subchannel_type = io_subchannel_ids, .name = "io_subchannel", .irq = io_subchannel_irq, - .notify = io_subchannel_notify, - .verify = io_subchannel_verify, - .termination = io_subchannel_ioterm, + .sch_event = io_subchannel_sch_event, + .chp_event = io_subchannel_chp_event, .probe = io_subchannel_probe, .remove = io_subchannel_remove, .shutdown = io_subchannel_shutdown, @@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev) ccw_device_set_online(cdev); return 0; } -static void online_store_handle_online(struct ccw_device *cdev, int force) +static int online_store_handle_online(struct ccw_device *cdev, int force) { int ret; ret = online_store_recog_and_online(cdev); if (ret) - return; + return ret; if (force && cdev->private->state == DEV_STATE_BOXED) { ret = ccw_device_stlck(cdev); - if (ret) { - dev_warn(&cdev->dev, - "ccw_device_stlck returned %d!\n", ret); - return; - } + if (ret) + return ret; if (cdev->id.cu_type == 0) cdev->private->state = DEV_STATE_NOT_OPER; online_store_recog_and_online(cdev); } - + return 0; } static ssize_t online_store (struct device *dev, struct device_attribute *attr, @@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, ret = count; break; case 1: - online_store_handle_online(cdev, force); - ret = count; + ret = online_store_handle_online(cdev, force); + if (!ret) + ret = count; break; default: ret = -EINVAL; @@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); static DEVICE_ATTR(online, 0644, online_show, online_store); static DEVICE_ATTR(availability, 0444, available_show, NULL); -static struct attribute * subch_attrs[] = { +static struct attribute *io_subchannel_attrs[] = { &dev_attr_chpids.attr, &dev_attr_pimpampom.attr, NULL, }; -static struct attribute_group subch_attr_group = { - .attrs = subch_attrs, -}; - -struct attribute_group *subch_attr_groups[] = { - &subch_attr_group, - NULL, +static struct attribute_group io_subchannel_attr_group = { + .attrs = io_subchannel_attrs, }; static struct attribute * ccwdev_attrs[] = { @@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch, sch_set_cdev(sch, cdev); cdev->private->schid = sch->schid; cdev->ccwlock = sch->lock; - device_trigger_reprobe(sch); + ccw_device_trigger_reprobe(cdev); spin_unlock_irq(sch->lock); } @@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) struct ccw_device_private *priv; sch_set_cdev(sch, cdev); - sch->driver = &io_subchannel_driver; cdev->ccwlock = sch->lock; /* Init private data. */ @@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch) dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); } -static int -io_subchannel_probe (struct subchannel *sch) +static void io_subchannel_init_fields(struct subchannel *sch) +{ + if (cio_is_console(sch->schid)) + sch->opm = 0xff; + else + sch->opm = chp_get_sch_opm(sch); + sch->lpm = sch->schib.pmcw.pam & sch->opm; + sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; + + CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" + " - PIM = %02X, PAM = %02X, POM = %02X\n", + sch->schib.pmcw.dev, sch->schid.ssid, + sch->schid.sch_no, sch->schib.pmcw.pim, + sch->schib.pmcw.pam, sch->schib.pmcw.pom); + /* Initially set up some fields in the pmcw. */ + sch->schib.pmcw.ena = 0; + sch->schib.pmcw.csense = 1; /* concurrent sense */ + if ((sch->lpm & (sch->lpm - 1)) != 0) + sch->schib.pmcw.mp = 1; /* multipath mode */ + /* clean up possible residual cmf stuff */ + sch->schib.pmcw.mme = 0; + sch->schib.pmcw.mbfc = 0; + sch->schib.pmcw.mbi = 0; + sch->schib.mba = 0; +} + +static int io_subchannel_probe(struct subchannel *sch) { struct ccw_device *cdev; int rc; @@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch) cdev = sch_get_cdev(sch); if (cdev) { + rc = sysfs_create_group(&sch->dev.kobj, + &io_subchannel_attr_group); + if (rc) + CIO_MSG_EVENT(0, "Failed to create io subchannel " + "attributes for subchannel " + "0.%x.%04x (rc=%d)\n", + sch->schid.ssid, sch->schid.sch_no, rc); /* * This subchannel already has an associated ccw_device. - * Register it and exit. This happens for all early - * device, e.g. the console. + * Throw the delayed uevent for the subchannel, register + * the ccw_device and exit. This happens for all early + * devices, e.g. the console. */ + sch->dev.uevent_suppress = 0; + kobject_uevent(&sch->dev.kobj, KOBJ_ADD); cdev->dev.groups = ccwdev_attr_groups; device_initialize(&cdev->dev); ccw_device_register(cdev); @@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch) get_device(&cdev->dev); return 0; } + io_subchannel_init_fields(sch); /* * First check if a fitting device may be found amongst the * disconnected devices or in the orphanage. */ dev_id.devno = sch->schib.pmcw.dev; dev_id.ssid = sch->schid.ssid; + rc = sysfs_create_group(&sch->dev.kobj, + &io_subchannel_attr_group); + if (rc) + return rc; /* Allocate I/O subchannel private data. */ sch->private = kzalloc(sizeof(struct io_subchannel_private), GFP_KERNEL | GFP_DMA); - if (!sch->private) - return -ENOMEM; + if (!sch->private) { + rc = -ENOMEM; + goto out_err; + } cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); if (!cdev) cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), @@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch) } cdev = io_subchannel_create_ccwdev(sch); if (IS_ERR(cdev)) { - kfree(sch->private); - return PTR_ERR(cdev); + rc = PTR_ERR(cdev); + goto out_err; } rc = io_subchannel_recog(cdev, sch); if (rc) { @@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch) spin_unlock_irqrestore(sch->lock, flags); if (cdev->dev.release) cdev->dev.release(&cdev->dev); - kfree(sch->private); + goto out_err; } - + return 0; +out_err: + kfree(sch->private); + sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); return rc; } @@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch) ccw_device_unregister(cdev); put_device(&cdev->dev); kfree(sch->private); + sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); return 0; } @@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event) cdev = sch_get_cdev(sch); if (!cdev) return 0; - if (!cdev->drv) - return 0; - if (!cdev->online) - return 0; - return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; + return ccw_device_notify(cdev, event); } static void io_subchannel_verify(struct subchannel *sch) @@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch) dev_fsm_event(cdev, DEV_EVENT_VERIFY); } -static void io_subchannel_ioterm(struct subchannel *sch) +static int check_for_io_on_path(struct subchannel *sch, int mask) { - struct ccw_device *cdev; + int cc; - cdev = sch_get_cdev(sch); - if (!cdev) - return; - /* Internal I/O will be retried by the interrupt handler. */ - if (cdev->private->flags.intretry) + cc = stsch(sch->schid, &sch->schib); + if (cc) + return 0; + if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) + return 1; + return 0; +} + +static void terminate_internal_io(struct subchannel *sch, + struct ccw_device *cdev) +{ + if (cio_clear(sch)) { + /* Recheck device in case clear failed. */ + sch->lpm = 0; + if (cdev->online) + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + else + css_schedule_eval(sch->schid); return; + } cdev->private->state = DEV_STATE_CLEAR_VERIFY; + /* Request retry of internal operation. */ + cdev->private->flags.intretry = 1; + /* Call handler. */ if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); } +static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) +{ + struct ccw_device *cdev; + + cdev = sch_get_cdev(sch); + if (!cdev) + return; + if (check_for_io_on_path(sch, mask)) { + if (cdev->private->state == DEV_STATE_ONLINE) + ccw_device_kill_io(cdev); + else { + terminate_internal_io(sch, cdev); + /* Re-start path verification. */ + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + } + } else + /* trigger path verification. */ + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + +} + +static int io_subchannel_chp_event(struct subchannel *sch, + struct chp_link *link, int event) +{ + int mask; + + mask = chp_ssd_get_mask(&sch->ssd_info, link); + if (!mask) + return 0; + switch (event) { + case CHP_VARY_OFF: + sch->opm &= ~mask; + sch->lpm &= ~mask; + io_subchannel_terminate_path(sch, mask); + break; + case CHP_VARY_ON: + sch->opm |= mask; + sch->lpm |= mask; + io_subchannel_verify(sch); + break; + case CHP_OFFLINE: + if (stsch(sch->schid, &sch->schib)) + return -ENXIO; + if (!css_sch_is_valid(&sch->schib)) + return -ENODEV; + io_subchannel_terminate_path(sch, mask); + break; + case CHP_ONLINE: + if (stsch(sch->schid, &sch->schib)) + return -ENXIO; + sch->lpm |= mask & sch->opm; + io_subchannel_verify(sch); + break; + } + return 0; +} + static void io_subchannel_shutdown(struct subchannel *sch) { @@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch) cio_disable_subchannel(sch); } +static int io_subchannel_get_status(struct subchannel *sch) +{ + struct schib schib; + + if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) + return CIO_GONE; + if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) + return CIO_REVALIDATE; + if (!sch->lpm) + return CIO_NO_PATH; + return CIO_OPER; +} + +static int device_is_disconnected(struct ccw_device *cdev) +{ + if (!cdev) + return 0; + return (cdev->private->state == DEV_STATE_DISCONNECTED || + cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); +} + +static int recovery_check(struct device *dev, void *data) +{ + struct ccw_device *cdev = to_ccwdev(dev); + int *redo = data; + + spin_lock_irq(cdev->ccwlock); + switch (cdev->private->state) { + case DEV_STATE_DISCONNECTED: + CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + dev_fsm_event(cdev, DEV_EVENT_VERIFY); + *redo = 1; + break; + case DEV_STATE_DISCONNECTED_SENSE_ID: + *redo = 1; + break; + } + spin_unlock_irq(cdev->ccwlock); + + return 0; +} + +static void recovery_work_func(struct work_struct *unused) +{ + int redo = 0; + + bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); + if (redo) { + spin_lock_irq(&recovery_lock); + if (!timer_pending(&recovery_timer)) { + if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) + recovery_phase++; + mod_timer(&recovery_timer, jiffies + + recovery_delay[recovery_phase] * HZ); + } + spin_unlock_irq(&recovery_lock); + } else + CIO_MSG_EVENT(4, "recovery: end\n"); +} + +static DECLARE_WORK(recovery_work, recovery_work_func); + +static void recovery_func(unsigned long data) +{ + /* + * We can't do our recovery in softirq context and it's not + * performance critical, so we schedule it. + */ + schedule_work(&recovery_work); +} + +static void ccw_device_schedule_recovery(void) +{ + unsigned long flags; + + CIO_MSG_EVENT(4, "recovery: schedule\n"); + spin_lock_irqsave(&recovery_lock, flags); + if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { + recovery_phase = 0; + mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); + } + spin_unlock_irqrestore(&recovery_lock, flags); +} + +static void device_set_disconnected(struct ccw_device *cdev) +{ + if (!cdev) + return; + ccw_device_set_timeout(cdev, 0); + cdev->private->flags.fake_irb = 0; + cdev->private->state = DEV_STATE_DISCONNECTED; + if (cdev->online) + ccw_device_schedule_recovery(); +} + +static int io_subchannel_sch_event(struct subchannel *sch, int slow) +{ + int event, ret, disc; + unsigned long flags; + enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; + struct ccw_device *cdev; + + spin_lock_irqsave(sch->lock, flags); + cdev = sch_get_cdev(sch); + disc = device_is_disconnected(cdev); + if (disc && slow) { + /* Disconnected devices are evaluated directly only.*/ + spin_unlock_irqrestore(sch->lock, flags); + return 0; + } + /* No interrupt after machine check - kill pending timers. */ + if (cdev) + ccw_device_set_timeout(cdev, 0); + if (!disc && !slow) { + /* Non-disconnected devices are evaluated on the slow path. */ + spin_unlock_irqrestore(sch->lock, flags); + return -EAGAIN; + } + event = io_subchannel_get_status(sch); + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", + sch->schid.ssid, sch->schid.sch_no, event, + disc ? "disconnected" : "normal", + slow ? "slow" : "fast"); + /* Analyze subchannel status. */ + action = NONE; + switch (event) { + case CIO_NO_PATH: + if (disc) { + /* Check if paths have become available. */ + action = REPROBE; + break; + } + /* fall through */ + case CIO_GONE: + /* Prevent unwanted effects when opening lock. */ + cio_disable_subchannel(sch); + device_set_disconnected(cdev); + /* Ask driver what to do with device. */ + action = UNREGISTER; + spin_unlock_irqrestore(sch->lock, flags); + ret = io_subchannel_notify(sch, event); + spin_lock_irqsave(sch->lock, flags); + if (ret) + action = NONE; + break; + case CIO_REVALIDATE: + /* Device will be removed, so no notify necessary. */ + if (disc) + /* Reprobe because immediate unregister might block. */ + action = REPROBE; + else + action = UNREGISTER_PROBE; + break; + case CIO_OPER: + if (disc) + /* Get device operational again. */ + action = REPROBE; + break; + } + /* Perform action. */ + ret = 0; + switch (action) { + case UNREGISTER: + case UNREGISTER_PROBE: + /* Unregister device (will use subchannel lock). */ + spin_unlock_irqrestore(sch->lock, flags); + css_sch_device_unregister(sch); + spin_lock_irqsave(sch->lock, flags); + + /* Reset intparm to zeroes. */ + sch->schib.pmcw.intparm = 0; + cio_modify(sch); + break; + case REPROBE: + ccw_device_trigger_reprobe(cdev); + break; + default: + break; + } + spin_unlock_irqrestore(sch->lock, flags); + /* Probe if necessary. */ + if (action == UNREGISTER_PROBE) + ret = css_probe_device(sch->schid); + + return ret; +} + #ifdef CONFIG_CCW_CONSOLE static struct ccw_device console_cdev; static struct ccw_device_private console_private; @@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void) return &ccw_console_lock; } -static int -ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) +static int ccw_device_console_enable(struct ccw_device *cdev, + struct subchannel *sch) { int rc; /* Attach subchannel private data. */ sch->private = cio_get_console_priv(); memset(sch->private, 0, sizeof(struct io_subchannel_private)); + io_subchannel_init_fields(sch); + sch->driver = &io_subchannel_driver; /* Initialize the ccw_device structure. */ cdev->dev.parent= &sch->dev; rc = io_subchannel_recog(cdev, sch); @@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev) return sch->schid; } -static int recovery_check(struct device *dev, void *data) -{ - struct ccw_device *cdev = to_ccwdev(dev); - int *redo = data; - - spin_lock_irq(cdev->ccwlock); - switch (cdev->private->state) { - case DEV_STATE_DISCONNECTED: - CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n", - cdev->private->dev_id.ssid, - cdev->private->dev_id.devno); - dev_fsm_event(cdev, DEV_EVENT_VERIFY); - *redo = 1; - break; - case DEV_STATE_DISCONNECTED_SENSE_ID: - *redo = 1; - break; - } - spin_unlock_irq(cdev->ccwlock); - - return 0; -} - -static void recovery_work_func(struct work_struct *unused) -{ - int redo = 0; - - bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); - if (redo) { - spin_lock_irq(&recovery_lock); - if (!timer_pending(&recovery_timer)) { - if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) - recovery_phase++; - mod_timer(&recovery_timer, jiffies + - recovery_delay[recovery_phase] * HZ); - } - spin_unlock_irq(&recovery_lock); - } else - CIO_MSG_EVENT(4, "recovery: end\n"); -} - -static DECLARE_WORK(recovery_work, recovery_work_func); - -static void recovery_func(unsigned long data) -{ - /* - * We can't do our recovery in softirq context and it's not - * performance critical, so we schedule it. - */ - schedule_work(&recovery_work); -} - -void ccw_device_schedule_recovery(void) -{ - unsigned long flags; - - CIO_MSG_EVENT(4, "recovery: schedule\n"); - spin_lock_irqsave(&recovery_lock, flags); - if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { - recovery_phase = 0; - mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); - } - spin_unlock_irqrestore(&recovery_lock, flags); -} - MODULE_LICENSE("GPL"); EXPORT_SYMBOL(ccw_device_set_online); EXPORT_SYMBOL(ccw_device_set_offline); diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index cb08092be39f..9800a8335a3f 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h @@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *); int ccw_device_online(struct ccw_device *); int ccw_device_offline(struct ccw_device *); -void ccw_device_schedule_recovery(void); - /* Function prototypes for device status and basic sense stuff. */ void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); @@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *); int ccw_device_stlck(struct ccw_device *); +/* Helper function for machine check handling. */ +void ccw_device_trigger_reprobe(struct ccw_device *); +void ccw_device_kill_io(struct ccw_device *); +int ccw_device_notify(struct ccw_device *, int); + /* qdio needs this. */ void ccw_device_set_timeout(struct ccw_device *, int); extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index e268d5a77c12..8b5fe57fb2f3 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -2,8 +2,7 @@ * drivers/s390/cio/device_fsm.c * finite state machine for device handling * - * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, - * IBM Corporation + * Copyright IBM Corp. 2002,2008 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com) */ @@ -27,65 +26,6 @@ static int timeout_log_enabled; -int -device_is_online(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return 0; - return (cdev->private->state == DEV_STATE_ONLINE); -} - -int -device_is_disconnected(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return 0; - return (cdev->private->state == DEV_STATE_DISCONNECTED || - cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); -} - -void -device_set_disconnected(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return; - ccw_device_set_timeout(cdev, 0); - cdev->private->flags.fake_irb = 0; - cdev->private->state = DEV_STATE_DISCONNECTED; - if (cdev->online) - ccw_device_schedule_recovery(); -} - -void device_set_intretry(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return; - cdev->private->flags.intretry = 1; -} - -int device_trigger_verify(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev || !cdev->online) - return -EINVAL; - dev_fsm_event(cdev, DEV_EVENT_VERIFY); - return 0; -} - static int __init ccw_timeout_log_setup(char *unused) { timeout_log_enabled = 1; @@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev) struct schib schib; struct subchannel *sch; struct io_subchannel_private *private; + union orb *orb; int cc; sch = to_subchannel(cdev->dev.parent); private = to_io_private(sch); + orb = &private->orb; cc = stsch(sch->schid, &schib); printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " "device information:\n", get_clock()); printk(KERN_WARNING "cio: orb:\n"); print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, - &private->orb, sizeof(private->orb), 0); + orb, sizeof(*orb), 0); printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); - if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || - (void *)(addr_t)private->orb.cpa == cdev->private->iccws) - printk(KERN_WARNING "cio: last channel program (intern):\n"); - else - printk(KERN_WARNING "cio: last channel program:\n"); - - print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, - (void *)(addr_t)private->orb.cpa, - sizeof(struct ccw1), 0); + if (orb->tm.b) { + printk(KERN_WARNING "cio: orb indicates transport mode\n"); + printk(KERN_WARNING "cio: last tcw:\n"); + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + (void *)(addr_t)orb->tm.tcw, + sizeof(struct tcw), 0); + } else { + printk(KERN_WARNING "cio: orb indicates command mode\n"); + if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw || + (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws) + printk(KERN_WARNING "cio: last channel program " + "(intern):\n"); + else + printk(KERN_WARNING "cio: last channel program:\n"); + + print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, + (void *)(addr_t)orb->cmd.cpa, + sizeof(struct ccw1), 0); + } printk(KERN_WARNING "cio: ccw device state: %d\n", cdev->private->state); printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); @@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) add_timer(&cdev->private->timer); } -/* Kill any pending timers after machine check. */ -void -device_kill_pending_timer(struct subchannel *sch) -{ - struct ccw_device *cdev; - - cdev = sch_get_cdev(sch); - if (!cdev) - return; - ccw_device_set_timeout(cdev, 0); -} - /* * Cancel running i/o. This is called repeatedly since halt/clear are * asynchronous operations. We do one try with cio_cancel, two tries @@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) /* Not operational -> done. */ return 0; /* Stage 1: cancel io. */ - if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && - !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { - ret = cio_cancel(sch); - if (ret != -EINVAL) - return ret; - /* cancel io unsuccessful. From now on it is asynchronous. */ + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && + !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { + if (!scsw_is_tm(&sch->schib.scsw)) { + ret = cio_cancel(sch); + if (ret != -EINVAL) + return ret; + } + /* cancel io unsuccessful or not applicable (transport mode). + * Continue with asynchronous instructions. */ cdev->private->iretry = 3; /* 3 halt retries. */ } - if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { /* Stage 2: halt io. */ if (cdev->private->iretry) { cdev->private->iretry--; @@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err) } } +int ccw_device_notify(struct ccw_device *cdev, int event) +{ + if (!cdev->drv) + return 0; + if (!cdev->online) + return 0; + return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; +} + static void ccw_device_oper_notify(struct work_struct *work) { struct ccw_device_private *priv; struct ccw_device *cdev; - struct subchannel *sch; int ret; - unsigned long flags; priv = container_of(work, struct ccw_device_private, kick_work); cdev = priv->cdev; - spin_lock_irqsave(cdev->ccwlock, flags); - sch = to_subchannel(cdev->dev.parent); - if (sch->driver && sch->driver->notify) { - spin_unlock_irqrestore(cdev->ccwlock, flags); - ret = sch->driver->notify(sch, CIO_OPER); - spin_lock_irqsave(cdev->ccwlock, flags); - } else - ret = 0; + ret = ccw_device_notify(cdev, CIO_OPER); if (ret) { /* Reenable channel measurements, if needed. */ - spin_unlock_irqrestore(cdev->ccwlock, flags); cmf_reenable(cdev); - spin_lock_irqsave(cdev->ccwlock, flags); wake_up(&cdev->private->wait_q); - } - spin_unlock_irqrestore(cdev->ccwlock, flags); - if (!ret) + } else /* Driver doesn't want device back. */ ccw_device_do_unreg_rereg(work); } @@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) /* Deliver fake irb to device driver, if needed. */ if (cdev->private->flags.fake_irb) { memset(&cdev->private->irb, 0, sizeof(struct irb)); - cdev->private->irb.scsw.cc = 1; - cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; - cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; - cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; + cdev->private->irb.scsw.cmd.cc = 1; + cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC; + cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND; + cdev->private->irb.scsw.cmd.stctl = + SCSW_STCTL_STATUS_PEND; cdev->private->flags.fake_irb = 0; if (cdev->handler) cdev->handler(cdev, cdev->private->intparm, @@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) return -ENODEV; - if (cdev->private->state != DEV_STATE_ONLINE) { - if (sch->schib.scsw.actl != 0) - return -EBUSY; - return -EINVAL; - } - if (sch->schib.scsw.actl != 0) + if (scsw_actl(&sch->schib.scsw) != 0) return -EBUSY; + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; /* Are we doing path grouping? */ if (!cdev->private->options.pgroup) { /* No, set state offline immediately. */ @@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) */ stsch(sch->schid, &sch->schib); - if (sch->schib.scsw.actl != 0 || - (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || - (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { + if (scsw_actl(&sch->schib.scsw) != 0 || + (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || + (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { /* * No final status yet or final status not yet delivered * to the device driver. Can't do path verfication now, @@ -823,13 +760,13 @@ static void ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) { struct irb *irb; + int is_cmd; irb = (struct irb *) __LC_IRB; + is_cmd = !scsw_is_tm(&irb->scsw); /* Check for unsolicited interrupt. */ - if ((irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) - && (!irb->scsw.cc)) { - if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && + if (!scsw_is_solicited(&irb->scsw)) { + if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && !irb->esw.esw0.erw.cons) { /* Unit check but no sense data. Need basic sense. */ if (ccw_device_do_sense(cdev, irb) != 0) @@ -848,7 +785,7 @@ call_handler_unsol: } /* Accumulate status and find out if a basic sense is needed. */ ccw_device_accumulate_irb(cdev, irb); - if (cdev->private->flags.dosense) { + if (is_cmd && cdev->private->flags.dosense) { if (ccw_device_do_sense(cdev, irb) == 0) { cdev->private->state = DEV_STATE_W4SENSE; } @@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; /* Check for unsolicited interrupt. */ - if (irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if (irb->scsw.cc == 1) + if (scsw_stctl(&irb->scsw) == + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { + if (scsw_cc(&irb->scsw) == 1) /* Basic sense hasn't started. Try again. */ ccw_device_do_sense(cdev, irb); else { @@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) * only deliver the halt/clear interrupt to the device driver as if it * had killed the original request. */ - if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { + if (scsw_fctl(&irb->scsw) & + (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { /* Retry Basic Sense if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) ERR_PTR(-EIO)); } -void device_kill_io(struct subchannel *sch) +void ccw_device_kill_io(struct ccw_device *cdev) { int ret; - struct ccw_device *cdev; - cdev = sch_get_cdev(sch); ret = ccw_device_cancel_halt_clear(cdev); if (ret == -EBUSY) { ccw_device_set_timeout(cdev, 3*HZ); @@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) case DEV_EVENT_INTERRUPT: irb = (struct irb *) __LC_IRB; /* Check for unsolicited interrupt. */ - if ((irb->scsw.stctl == + if ((scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && - (!irb->scsw.cc)) + (!scsw_cc(&irb->scsw))) /* FIXME: we should restart stlck here, but this * is extremely unlikely ... */ goto out_wakeup; @@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_sense_id_start(cdev); } -void -device_trigger_reprobe(struct subchannel *sch) +void ccw_device_trigger_reprobe(struct ccw_device *cdev) { - struct ccw_device *cdev; + struct subchannel *sch; - cdev = sch_get_cdev(sch); - if (!cdev) - return; if (cdev->private->state != DEV_STATE_DISCONNECTED) return; + sch = to_subchannel(cdev->dev.parent); /* Update some values. */ if (stsch(sch->schid, &sch->schib)) return; @@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch) sch->schib.pmcw.ena = 0; if ((sch->lpm & (sch->lpm - 1)) != 0) sch->schib.pmcw.mp = 1; - sch->schib.pmcw.intparm = (u32)(addr_t)sch; /* We should also udate ssd info, but this has to wait. */ /* Check if this is another device which appeared on the same sch. */ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index cba7020517ed..1bdaa614e34f 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) irb = &cdev->private->irb; /* Check the error cases. */ - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry Sense ID if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev) irb->ecw[6], irb->ecw[7]); return -EAGAIN; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { u8 lpm; - lpm = to_io_private(sch)->orb.lpm; + lpm = to_io_private(sch)->orb.cmd.lpm; if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " "on subchannel 0.%x.%04x is " @@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev) } /* Did we get a proper answer ? */ - if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && + if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && cdev->private->senseid.reserved == 0xFF) { - if (irb->scsw.count < sizeof(struct senseid) - 8) + if (irb->scsw.cmd.count < sizeof(struct senseid) - 8) cdev->private->flags.esid = 1; return 0; /* Success */ } @@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev) "subchannel 0.%x.%04x returns status %02X%02X\n", cdev->private->dev_id.devno, sch->schid.ssid, sch->schid.sch_no, - irb->scsw.dstat, irb->scsw.cstat); + irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); return -EAGAIN; } @@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) sch = to_subchannel(cdev->dev.parent); irb = (struct irb *) __LC_IRB; /* Retry sense id, if needed. */ - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { - if ((irb->scsw.cc == 1) || !irb->scsw.actl) { + if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { ret = __ccw_device_sense_id_start(cdev); if (ret && ret != -EBUSY) ccw_device_sense_id_done(cdev, ret); diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index f308ad55a6d5..ee1a28310fbb 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -17,6 +17,7 @@ #include <asm/ccwdev.h> #include <asm/idals.h> #include <asm/chpid.h> +#include <asm/fcx.h> #include "cio.h" #include "cio_debug.h" @@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, return -EBUSY; } if (cdev->private->state != DEV_STATE_ONLINE || - ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || + ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && + !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) || cdev->private->flags.doverify) return -EBUSY; ret = cio_set_options (sch, flags); @@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev) if (cdev->private->state == DEV_STATE_NOT_OPER) return -ENODEV; if (cdev->private->state != DEV_STATE_ONLINE || - !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) + !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) return -EINVAL; return cio_resume(sch); } @@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev) * - fast notification was requested (primary status) * - unsolicited interrupts */ - stctl = cdev->private->irb.scsw.stctl; + stctl = scsw_stctl(&cdev->private->irb.scsw); ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || (stctl == SCSW_STCTL_STATUS_PEND); @@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev) cio_disable_subchannel(sch); //FIXME: return code? goto out_unlock; } - cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; + cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; spin_unlock_irqrestore(sch->lock, flags); - wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); + wait_event(cdev->private->wait_q, + cdev->private->irb.scsw.cmd.actl == 0); spin_lock_irqsave(sch->lock, flags); cio_disable_subchannel(sch); //FIXME: return code? - if ((cdev->private->irb.scsw.dstat != + if ((cdev->private->irb.scsw.cmd.dstat != (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || - (cdev->private->irb.scsw.cstat != 0)) + (cdev->private->irb.scsw.cmd.cstat != 0)) ret = -EIO; /* Clear irb. */ memset(&cdev->private->irb, 0, sizeof(struct irb)); @@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id) } EXPORT_SYMBOL(ccw_device_get_id); +/** + * ccw_device_tm_start_key - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key) +{ + struct subchannel *sch; + int rc; + + sch = to_subchannel(cdev->dev.parent); + if (cdev->private->state != DEV_STATE_ONLINE) + return -EIO; + /* Adjust requested path mask to excluded varied off paths. */ + if (lpm) { + lpm &= sch->opm; + if (lpm == 0) + return -EACCES; + } + rc = cio_tm_start_key(sch, tcw, lpm, key); + if (rc == 0) + cdev->private->intparm = intparm; + return rc; +} +EXPORT_SYMBOL(ccw_device_tm_start_key); + +/** + * ccw_device_tm_start_timeout_key - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @key: storage key to use for storage access + * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, u8 key, + int expires) +{ + int ret; + + ccw_device_set_timeout(cdev, expires); + ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key); + if (ret != 0) + ccw_device_set_timeout(cdev, 0); + return ret; +} +EXPORT_SYMBOL(ccw_device_tm_start_timeout_key); + +/** + * ccw_device_tm_start - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm) +{ + return ccw_device_tm_start_key(cdev, tcw, intparm, lpm, + PAGE_DEFAULT_KEY); +} +EXPORT_SYMBOL(ccw_device_tm_start); + +/** + * ccw_device_tm_start_timeout - perform start function + * @cdev: ccw device on which to perform the start function + * @tcw: transport-command word to be started + * @intparm: user defined parameter to be passed to the interrupt handler + * @lpm: mask of paths to use + * @expires: time span in jiffies after which to abort request + * + * Start the tcw on the given ccw device. Return zero on success, non-zero + * otherwise. + */ +int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw, + unsigned long intparm, u8 lpm, int expires) +{ + return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, + PAGE_DEFAULT_KEY, expires); +} +EXPORT_SYMBOL(ccw_device_tm_start_timeout); + +/** + * ccw_device_tm_intrg - perform interrogate function + * @cdev: ccw device on which to perform the interrogate function + * + * Perform an interrogate function on the given ccw device. Return zero on + * success, non-zero otherwise. + */ +int ccw_device_tm_intrg(struct ccw_device *cdev) +{ + struct subchannel *sch = to_subchannel(cdev->dev.parent); + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EIO; + if (!scsw_is_tm(&sch->schib.scsw) || + !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND)) + return -EINVAL; + return cio_tm_intrg(sch); +} +EXPORT_SYMBOL(ccw_device_tm_intrg); + // FIXME: these have to go: int diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 5cf7be008e98..86bc94eb607f 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -28,13 +28,13 @@ * Helper function called from interrupt context to decide whether an * operation should be tried again. */ -static int __ccw_device_should_retry(struct scsw *scsw) +static int __ccw_device_should_retry(union scsw *scsw) { /* CC is only valid if start function bit is set. */ - if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) + if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) return 1; /* No more activity. For sense and set PGID we stubbornly try again. */ - if (!scsw->actl) + if (!scsw->cmd.actl) return 1; return 0; } @@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry Sense PGID if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) irb->ecw[6], irb->ecw[7]); return -EAGAIN; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { u8 lpm; - lpm = to_io_private(sch)->orb.lpm; + lpm = to_io_private(sch)->orb.cmd.lpm; CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, @@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { if (__ccw_device_should_retry(&irb->scsw)) { ret = __ccw_device_sense_pgid_start(cdev); @@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry Set PGID if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev) irb->ecw[6], irb->ecw[7]); return -EAGAIN; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, @@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) sch = to_subchannel(cdev->dev.parent); irb = &cdev->private->irb; - if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { /* Retry NOP if requested. */ if (cdev->private->flags.intretry) { cdev->private->flags.intretry = 0; @@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev) } return -ETIME; } - if (irb->scsw.cc == 3) { + if (irb->scsw.cmd.cc == 3) { CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," " lpm %02X, became 'not operational'\n", cdev->private->dev_id.devno, sch->schid.ssid, @@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { if (__ccw_device_should_retry(&irb->scsw)) __ccw_device_verify_start(cdev); @@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) irb = (struct irb *) __LC_IRB; - if (irb->scsw.stctl == + if (irb->scsw.cmd.stctl == (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { if (__ccw_device_should_retry(&irb->scsw)) __ccw_device_disband_start(cdev); diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 4a38993000f2..1b03c5423be2 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c @@ -29,9 +29,11 @@ static void ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) { - if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | - SCHN_STAT_CHN_CTRL_CHK | - SCHN_STAT_INTF_CTRL_CHK))) + char dbf_text[15]; + + if (!scsw_is_valid_cstat(&irb->scsw) || + !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK | + SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK))) return; CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " "received" @@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) ": %02X sch_stat : %02X\n", cdev->private->dev_id.devno, cdev->private->schid.ssid, cdev->private->schid.sch_no, - irb->scsw.dstat, irb->scsw.cstat); - - if (irb->scsw.cc != 3) { - char dbf_text[15]; - - sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); - CIO_TRACE_EVENT(0, dbf_text); - CIO_HEX_EVENT(0, irb, sizeof (struct irb)); - } + scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); + sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); + CIO_TRACE_EVENT(0, dbf_text); + CIO_HEX_EVENT(0, irb, sizeof(struct irb)); } /* @@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) * are condition that have to be met for the extended control * bit to have meaning. Sick. */ - cdev->private->irb.scsw.ectl = 0; - if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && - !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) - cdev->private->irb.scsw.ectl = irb->scsw.ectl; + cdev->private->irb.scsw.cmd.ectl = 0; + if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) && + !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS)) + cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl; /* Check if extended control word is valid. */ - if (!cdev->private->irb.scsw.ectl) + if (!cdev->private->irb.scsw.cmd.ectl) return; /* Copy concurrent sense / model dependent information. */ memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); @@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) static int ccw_device_accumulate_esw_valid(struct irb *irb) { - if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) + if (!irb->scsw.cmd.eswf && + (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND)) return 0; - if (irb->scsw.stctl == - (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && - !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) + if (irb->scsw.cmd.stctl == + (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && + !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) return 0; return 1; } @@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; /* Copy subchannel logout information if esw is of format 0. */ - if (irb->scsw.eswf) { + if (irb->scsw.cmd.eswf) { cdev_sublog = &cdev_irb->esw.esw0.sublog; sublog = &irb->esw.esw0.sublog; /* Copy extended status flags. */ @@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) * Copy fields that have a meaning for channel data check * channel control check and interface control check. */ - if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | + if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)) { /* Copy ancillary report bit. */ @@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) /* Copy i/o-error alert. */ cdev_sublog->ioerr = sublog->ioerr; /* Copy channel path timeout bit. */ - if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) + if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK) cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; /* Copy failing storage address validity flag. */ cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; @@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * If not, the remaining bit have no meaning and we must ignore them. * The esw is not meaningful as well... */ - if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) + if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) return; /* Check for channel checks and interface control checks. */ ccw_device_msg_control_check(cdev, irb); /* Check for path not operational. */ - if (irb->scsw.pno && irb->scsw.fctl != 0 && - (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) + if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) ccw_device_path_notoper(cdev); - + /* No irb accumulation for transport mode irbs. */ + if (scsw_is_tm(&irb->scsw)) { + memcpy(&cdev->private->irb, irb, sizeof(struct irb)); + return; + } /* * Don't accumulate unsolicited interrupts. */ - if ((irb->scsw.stctl == - (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && - (!irb->scsw.cc)) + if (!scsw_is_solicited(&irb->scsw)) return; cdev_irb = &cdev->private->irb; @@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * status at the subchannel has been cleared and we must not pass * intermediate accumulated status to the device driver. */ - if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) memset(&cdev->private->irb, 0, sizeof(struct irb)); /* Copy bits which are valid only for the start function. */ - if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { + if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) { /* Copy key. */ - cdev_irb->scsw.key = irb->scsw.key; + cdev_irb->scsw.cmd.key = irb->scsw.cmd.key; /* Copy suspend control bit. */ - cdev_irb->scsw.sctl = irb->scsw.sctl; + cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl; /* Accumulate deferred condition code. */ - cdev_irb->scsw.cc |= irb->scsw.cc; + cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc; /* Copy ccw format bit. */ - cdev_irb->scsw.fmt = irb->scsw.fmt; + cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt; /* Copy prefetch bit. */ - cdev_irb->scsw.pfch = irb->scsw.pfch; + cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch; /* Copy initial-status-interruption-control. */ - cdev_irb->scsw.isic = irb->scsw.isic; + cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic; /* Copy address limit checking control. */ - cdev_irb->scsw.alcc = irb->scsw.alcc; + cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc; /* Copy suppress suspend bit. */ - cdev_irb->scsw.ssi = irb->scsw.ssi; + cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi; } /* Take care of the extended control bit and extended control word. */ ccw_device_accumulate_ecw(cdev, irb); /* Accumulate function control. */ - cdev_irb->scsw.fctl |= irb->scsw.fctl; + cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl; /* Copy activity control. */ - cdev_irb->scsw.actl= irb->scsw.actl; + cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl; /* Accumulate status control. */ - cdev_irb->scsw.stctl |= irb->scsw.stctl; + cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl; /* * Copy ccw address if it is valid. This is a bit simplified * but should be close enough for all practical purposes. */ - if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || - ((irb->scsw.stctl == + if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) || + ((irb->scsw.cmd.stctl == (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && - (irb->scsw.actl & SCSW_ACTL_DEVACT) && - (irb->scsw.actl & SCSW_ACTL_SCHACT)) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) - cdev_irb->scsw.cpa = irb->scsw.cpa; + (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) && + (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) || + (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)) + cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa; /* Accumulate device status, but not the device busy flag. */ - cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; + cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY; /* dstat is not always valid. */ - if (irb->scsw.stctl & + if (irb->scsw.cmd.stctl & (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) - cdev_irb->scsw.dstat |= irb->scsw.dstat; + cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat; /* Accumulate subchannel status. */ - cdev_irb->scsw.cstat |= irb->scsw.cstat; + cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat; /* Copy residual count if it is valid. */ - if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && - (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) - cdev_irb->scsw.count = irb->scsw.count; + if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) && + (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) + == 0) + cdev_irb->scsw.cmd.count = irb->scsw.cmd.count; /* Take care of bits in the extended status word. */ ccw_device_accumulate_esw(cdev, irb); @@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) * sense facility available/supported when enabling the * concurrent sense facility. */ - if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && + if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && !(cdev_irb->esw.esw0.erw.cons)) cdev->private->flags.dosense = 1; } @@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) sch = to_subchannel(cdev->dev.parent); /* A sense is required, can we do it now ? */ - if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) + if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) /* * we received an Unit Check but we have no final * status yet, therefore we must delay the SENSE @@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) * If not, the remaining bit have no meaning and we must ignore them. * The esw is not meaningful as well... */ - if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) + if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) return; /* Check for channel checks and interface control checks. */ ccw_device_msg_control_check(cdev, irb); /* Check for path not operational. */ - if (irb->scsw.pno && irb->scsw.fctl != 0 && - (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || - (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) + if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw)) ccw_device_path_notoper(cdev); - if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && - (irb->scsw.dstat & DEV_STAT_CHN_END)) { + if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && + (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) { cdev->private->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; } @@ -386,11 +383,11 @@ int ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) { ccw_device_accumulate_irb(cdev, irb); - if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) + if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) return -EBUSY; /* Check for basic sense. */ if (cdev->private->flags.dosense && - !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { + !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) { cdev->private->irb.esw.esw0.erw.cons = 1; cdev->private->flags.dosense = 0; return 0; diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c new file mode 100644 index 000000000000..61677dfbdc9b --- /dev/null +++ b/drivers/s390/cio/fcx.c @@ -0,0 +1,350 @@ +/* + * Functions for assembling fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <asm/fcx.h> +#include "cio.h" + +/** + * tcw_get_intrg - return pointer to associated interrogate tcw + * @tcw: pointer to the original tcw + * + * Return a pointer to the interrogate tcw associated with the specified tcw + * or %NULL if there is no associated interrogate tcw. + */ +struct tcw *tcw_get_intrg(struct tcw *tcw) +{ + return (struct tcw *) ((addr_t) tcw->intrg); +} +EXPORT_SYMBOL(tcw_get_intrg); + +/** + * tcw_get_data - return pointer to input/output data associated with tcw + * @tcw: pointer to the tcw + * + * Return the input or output data address specified in the tcw depending + * on whether the r-bit or the w-bit is set. If neither bit is set, return + * %NULL. + */ +void *tcw_get_data(struct tcw *tcw) +{ + if (tcw->r) + return (void *) ((addr_t) tcw->input); + if (tcw->w) + return (void *) ((addr_t) tcw->output); + return NULL; +} +EXPORT_SYMBOL(tcw_get_data); + +/** + * tcw_get_tccb - return pointer to tccb associated with tcw + * @tcw: pointer to the tcw + * + * Return pointer to the tccb associated with this tcw. + */ +struct tccb *tcw_get_tccb(struct tcw *tcw) +{ + return (struct tccb *) ((addr_t) tcw->tccb); +} +EXPORT_SYMBOL(tcw_get_tccb); + +/** + * tcw_get_tsb - return pointer to tsb associated with tcw + * @tcw: pointer to the tcw + * + * Return pointer to the tsb associated with this tcw. + */ +struct tsb *tcw_get_tsb(struct tcw *tcw) +{ + return (struct tsb *) ((addr_t) tcw->tsb); +} +EXPORT_SYMBOL(tcw_get_tsb); + +/** + * tcw_init - initialize tcw data structure + * @tcw: pointer to the tcw to be initialized + * @r: initial value of the r-bit + * @w: initial value of the w-bit + * + * Initialize all fields of the specified tcw data structure with zero and + * fill in the format, flags, r and w fields. + */ +void tcw_init(struct tcw *tcw, int r, int w) +{ + memset(tcw, 0, sizeof(struct tcw)); + tcw->format = TCW_FORMAT_DEFAULT; + tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT); + if (r) + tcw->r = 1; + if (w) + tcw->w = 1; +} +EXPORT_SYMBOL(tcw_init); + +static inline size_t tca_size(struct tccb *tccb) +{ + return tccb->tcah.tcal - 12; +} + +static u32 calc_dcw_count(struct tccb *tccb) +{ + int offset; + struct dcw *dcw; + u32 count = 0; + size_t size; + + size = tca_size(tccb); + for (offset = 0; offset < size;) { + dcw = (struct dcw *) &tccb->tca[offset]; + count += dcw->count; + if (!(dcw->flags & DCW_FLAGS_CC)) + break; + offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4); + } + return count; +} + +static u32 calc_cbc_size(struct tidaw *tidaw, int num) +{ + int i; + u32 cbc_data; + u32 cbc_count = 0; + u64 data_count = 0; + + for (i = 0; i < num; i++) { + if (tidaw[i].flags & TIDAW_FLAGS_LAST) + break; + /* TODO: find out if padding applies to total of data + * transferred or data transferred by this tidaw. Assumption: + * applies to total. */ + data_count += tidaw[i].count; + if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) { + cbc_data = 4 + ALIGN(data_count, 4) - data_count; + cbc_count += cbc_data; + data_count += cbc_data; + } + } + return cbc_count; +} + +/** + * tcw_finalize - finalize tcw length fields and tidaw list + * @tcw: pointer to the tcw + * @num_tidaws: the number of tidaws used to address input/output data or zero + * if no tida is used + * + * Calculate the input-/output-count and tccbl field in the tcw, add a + * tcat the tccb and terminate the data tidaw list if used. + * + * Note: in case input- or output-tida is used, the tidaw-list must be stored + * in contiguous storage (no ttic). The tcal field in the tccb must be + * up-to-date. + */ +void tcw_finalize(struct tcw *tcw, int num_tidaws) +{ + struct tidaw *tidaw; + struct tccb *tccb; + struct tccb_tcat *tcat; + u32 count; + + /* Terminate tidaw list. */ + tidaw = tcw_get_data(tcw); + if (num_tidaws > 0) + tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST; + /* Add tcat to tccb. */ + tccb = tcw_get_tccb(tcw); + tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)]; + memset(tcat, 0, sizeof(tcat)); + /* Calculate tcw input/output count and tcat transport count. */ + count = calc_dcw_count(tccb); + if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA)) + count += calc_cbc_size(tidaw, num_tidaws); + if (tcw->r) + tcw->input_count = count; + else if (tcw->w) + tcw->output_count = count; + tcat->count = ALIGN(count, 4) + 4; + /* Calculate tccbl. */ + tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) + + sizeof(struct tccb_tcat) - 20) >> 2; +} +EXPORT_SYMBOL(tcw_finalize); + +/** + * tcw_set_intrg - set the interrogate tcw address of a tcw + * @tcw: the tcw address + * @intrg_tcw: the address of the interrogate tcw + * + * Set the address of the interrogate tcw in the specified tcw. + */ +void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) +{ + tcw->intrg = (u32) ((addr_t) intrg_tcw); +} +EXPORT_SYMBOL(tcw_set_intrg); + +/** + * tcw_set_data - set data address and tida flag of a tcw + * @tcw: the tcw address + * @data: the data address + * @use_tidal: zero of the data address specifies a contiguous block of data, + * non-zero if it specifies a list if tidaws. + * + * Set the input/output data address of a tcw (depending on the value of the + * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag + * is set as well. + */ +void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) +{ + if (tcw->r) { + tcw->input = (u64) ((addr_t) data); + if (use_tidal) + tcw->flags |= TCW_FLAGS_INPUT_TIDA; + } else if (tcw->w) { + tcw->output = (u64) ((addr_t) data); + if (use_tidal) + tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; + } +} +EXPORT_SYMBOL(tcw_set_data); + +/** + * tcw_set_tccb - set tccb address of a tcw + * @tcw: the tcw address + * @tccb: the tccb address + * + * Set the address of the tccb in the specified tcw. + */ +void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) +{ + tcw->tccb = (u64) ((addr_t) tccb); +} +EXPORT_SYMBOL(tcw_set_tccb); + +/** + * tcw_set_tsb - set tsb address of a tcw + * @tcw: the tcw address + * @tsb: the tsb address + * + * Set the address of the tsb in the specified tcw. + */ +void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) +{ + tcw->tsb = (u64) ((addr_t) tsb); +} +EXPORT_SYMBOL(tcw_set_tsb); + +/** + * tccb_init - initialize tccb + * @tccb: the tccb address + * @size: the maximum size of the tccb + * @sac: the service-action-code to be user + * + * Initialize the header of the specified tccb by resetting all values to zero + * and filling in defaults for format, sac and initial tcal fields. + */ +void tccb_init(struct tccb *tccb, size_t size, u32 sac) +{ + memset(tccb, 0, size); + tccb->tcah.format = TCCB_FORMAT_DEFAULT; + tccb->tcah.sac = sac; + tccb->tcah.tcal = 12; +} +EXPORT_SYMBOL(tccb_init); + +/** + * tsb_init - initialize tsb + * @tsb: the tsb address + * + * Initialize the specified tsb by resetting all values to zero. + */ +void tsb_init(struct tsb *tsb) +{ + memset(tsb, 0, sizeof(tsb)); +} +EXPORT_SYMBOL(tsb_init); + +/** + * tccb_add_dcw - add a dcw to the tccb + * @tccb: the tccb address + * @tccb_size: the maximum tccb size + * @cmd: the dcw command + * @flags: flags for the dcw + * @cd: pointer to control data for this dcw or NULL if none is required + * @cd_count: number of control data bytes for this dcw + * @count: number of data bytes for this dcw + * + * Add a new dcw to the specified tccb by writing the dcw information specified + * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return + * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw + * would exceed the available space as defined by @tccb_size. + * + * Note: the tcal field of the tccb header will be updates to reflect added + * content. + */ +struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags, + void *cd, u8 cd_count, u32 count) +{ + struct dcw *dcw; + int size; + int tca_offset; + + /* Check for space. */ + tca_offset = tca_size(tccb); + size = ALIGN(sizeof(struct dcw) + cd_count, 4); + if (sizeof(struct tccb_tcah) + tca_offset + size + + sizeof(struct tccb_tcat) > tccb_size) + return ERR_PTR(-ENOSPC); + /* Add dcw to tca. */ + dcw = (struct dcw *) &tccb->tca[tca_offset]; + memset(dcw, 0, size); + dcw->cmd = cmd; + dcw->flags = flags; + dcw->count = count; + dcw->cd_count = cd_count; + if (cd) + memcpy(&dcw->cd[0], cd, cd_count); + tccb->tcah.tcal += size; + return dcw; +} +EXPORT_SYMBOL(tccb_add_dcw); + +/** + * tcw_add_tidaw - add a tidaw to a tcw + * @tcw: the tcw address + * @num_tidaws: the current number of tidaws + * @flags: flags for the new tidaw + * @addr: address value for the new tidaw + * @count: count value for the new tidaw + * + * Add a new tidaw to the input/output data tidaw-list of the specified tcw + * (depending on the value of the r-flag and w-flag) and return a pointer to + * the new tidaw. + * + * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller + * must ensure that there is enough space for the new tidaw. The last-tidaw + * flag for the last tidaw in the list will be set by tcw_finalize. + */ +struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags, + void *addr, u32 count) +{ + struct tidaw *tidaw; + + /* Add tidaw to tidaw-list. */ + tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws; + memset(tidaw, 0, sizeof(struct tidaw)); + tidaw->flags = flags; + tidaw->count = count; + tidaw->addr = (u64) ((addr_t) addr); + return tidaw; +} +EXPORT_SYMBOL(tcw_add_tidaw); diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 144466ab8c15..528065cb5021 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h @@ -8,7 +8,7 @@ #ifndef S390_IDSET_H #define S390_IDSET_H S390_IDSET_H -#include "schid.h" +#include <asm/schid.h> struct idset; diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8c613160bfce..3f8f1cf69c76 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h @@ -1,12 +1,12 @@ #ifndef S390_IO_SCH_H #define S390_IO_SCH_H -#include "schid.h" +#include <asm/schid.h> /* - * operation request block + * command-mode operation request block */ -struct orb { +struct cmd_orb { u32 intparm; /* interruption parameter */ u32 key : 4; /* flags, like key, suspend control, etc. */ u32 spnd : 1; /* suspend control */ @@ -28,8 +28,36 @@ struct orb { u32 cpa; /* channel program address */ } __attribute__ ((packed, aligned(4))); +/* + * transport-mode operation request block + */ +struct tm_orb { + u32 intparm; + u32 key:4; + u32 :9; + u32 b:1; + u32 :2; + u32 lpm:8; + u32 :7; + u32 x:1; + u32 tcw; + u32 prio:8; + u32 :8; + u32 rsvpgm:8; + u32 :8; + u32 :32; + u32 :32; + u32 :32; + u32 :32; +} __attribute__ ((packed, aligned(4))); + +union orb { + struct cmd_orb cmd; + struct tm_orb tm; +} __attribute__ ((packed, aligned(4))); + struct io_subchannel_private { - struct orb orb; /* operation request block */ + union orb orb; /* operation request block */ struct ccw1 sense_ccw; /* static ccw for sense command */ } __attribute__ ((aligned(8))); @@ -95,16 +123,18 @@ struct ccw_device_private { void *cmb_wait; /* deferred cmb enable/disable */ }; -static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) +static inline int ssch(struct subchannel_id schid, volatile union orb *addr) { register struct subchannel_id reg1 asm("1") = schid; - int ccode; + int ccode = -EIO; asm volatile( " ssch 0(%2)\n" - " ipm %0\n" - " srl %0,28" - : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); return ccode; } diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h index 652ea3625f9d..9fa2ac13ac85 100644 --- a/drivers/s390/cio/ioasm.h +++ b/drivers/s390/cio/ioasm.h @@ -2,7 +2,7 @@ #define S390_CIO_IOASM_H #include <asm/chpid.h> -#include "schid.h" +#include <asm/schid.h> /* * TPI info structure diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c new file mode 100644 index 000000000000..c592087be0f1 --- /dev/null +++ b/drivers/s390/cio/isc.c @@ -0,0 +1,68 @@ +/* + * Functions for registration of I/O interruption subclasses on s390. + * + * Copyright IBM Corp. 2008 + * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com> + */ + +#include <linux/spinlock.h> +#include <linux/module.h> +#include <asm/isc.h> + +static unsigned int isc_refs[MAX_ISC + 1]; +static DEFINE_SPINLOCK(isc_ref_lock); + + +/** + * isc_register - register an I/O interruption subclass. + * @isc: I/O interruption subclass to register + * + * The number of users for @isc is increased. If this is the first user to + * register @isc, the corresponding I/O interruption subclass mask is enabled. + * + * Context: + * This function must not be called in interrupt context. + */ +void isc_register(unsigned int isc) +{ + if (isc > MAX_ISC) { + WARN_ON(1); + return; + } + + spin_lock(&isc_ref_lock); + if (isc_refs[isc] == 0) + ctl_set_bit(6, 31 - isc); + isc_refs[isc]++; + spin_unlock(&isc_ref_lock); +} +EXPORT_SYMBOL_GPL(isc_register); + +/** + * isc_unregister - unregister an I/O interruption subclass. + * @isc: I/O interruption subclass to unregister + * + * The number of users for @isc is decreased. If this is the last user to + * unregister @isc, the corresponding I/O interruption subclass mask is + * disabled. + * Note: This function must not be called if isc_register() hasn't been called + * before by the driver for @isc. + * + * Context: + * This function must not be called in interrupt context. + */ +void isc_unregister(unsigned int isc) +{ + spin_lock(&isc_ref_lock); + /* check for misuse */ + if (isc > MAX_ISC || isc_refs[isc] == 0) { + WARN_ON(1); + goto out_unlock; + } + if (isc_refs[isc] == 1) + ctl_clear_bit(6, 31 - isc); + isc_refs[isc]--; +out_unlock: + spin_unlock(&isc_ref_lock); +} +EXPORT_SYMBOL_GPL(isc_unregister); diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c new file mode 100644 index 000000000000..17da9ab932ed --- /dev/null +++ b/drivers/s390/cio/itcw.c @@ -0,0 +1,327 @@ +/* + * Functions for incremental construction of fcx enabled I/O control blocks. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/module.h> +#include <asm/fcx.h> +#include <asm/itcw.h> + +/** + * struct itcw - incremental tcw helper data type + * + * This structure serves as a handle for the incremental construction of a + * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate + * tcw and associated data. The data structures are contained inside a single + * contiguous buffer provided by the user. + * + * The itcw construction functions take care of overall data integrity: + * - reset unused fields to zero + * - fill in required pointers + * - ensure required alignment for data structures + * - prevent data structures to cross 4k-byte boundary where required + * - calculate tccb-related length fields + * - optionally provide ready-made interrogate tcw and associated structures + * + * Restrictions apply to the itcws created with these construction functions: + * - tida only supported for data address, not for tccb + * - only contiguous tidaw-lists (no ttic) + * - total number of bytes required per itcw may not exceed 4k bytes + * - either read or write operation (may not work with r=0 and w=0) + * + * Example: + * struct itcw *itcw; + * void *buffer; + * size_t size; + * + * size = itcw_calc_size(1, 2, 0); + * buffer = kmalloc(size, GFP_DMA); + * if (!buffer) + * return -ENOMEM; + * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0); + * if (IS_ERR(itcw)) + * return PTR_ER(itcw); + * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72); + * itcw_add_tidaw(itcw, 0, 0x30000, 20); + * itcw_add_tidaw(itcw, 0, 0x40000, 52); + * itcw_finalize(itcw); + * + */ +struct itcw { + struct tcw *tcw; + struct tcw *intrg_tcw; + int num_tidaws; + int max_tidaws; + int intrg_num_tidaws; + int intrg_max_tidaws; +}; + +/** + * itcw_get_tcw - return pointer to tcw associated with the itcw + * @itcw: address of the itcw + * + * Return pointer to the tcw associated with the itcw. + */ +struct tcw *itcw_get_tcw(struct itcw *itcw) +{ + return itcw->tcw; +} +EXPORT_SYMBOL(itcw_get_tcw); + +/** + * itcw_calc_size - return the size of an itcw with the given parameters + * @intrg: if non-zero, add an interrogate tcw + * @max_tidaws: maximum number of tidaws to be used for data addressing or zero + * if no tida is to be used. + * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing + * by the interrogate tcw, if specified + * + * Calculate and return the number of bytes required to hold an itcw with the + * given parameters and assuming tccbs with maximum size. + * + * Note that the resulting size also contains bytes needed for alignment + * padding as well as padding to ensure that data structures don't cross a + * 4k-boundary where required. + */ +size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) +{ + size_t len; + + /* Main data. */ + len = sizeof(struct itcw); + len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + + /* TSB */ sizeof(struct tsb) + + /* TIDAL */ max_tidaws * sizeof(struct tidaw); + /* Interrogate data. */ + if (intrg) { + len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE + + /* TSB */ sizeof(struct tsb) + + /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); + } + /* Maximum required alignment padding. */ + len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; + /* Maximum padding for structures that may not cross 4k boundary. */ + if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) + len += max(max_tidaws, intrg_max_tidaws) * + sizeof(struct tidaw) - 1; + return len; +} +EXPORT_SYMBOL(itcw_calc_size); + +#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095)) + +static inline void *fit_chunk(addr_t *start, addr_t end, size_t len, + int align, int check_4k) +{ + addr_t addr; + + addr = ALIGN(*start, align); + if (check_4k && CROSS4K(addr, len)) { + addr = ALIGN(addr, 4096); + addr = ALIGN(addr, align); + } + if (addr + len > end) + return ERR_PTR(-ENOSPC); + *start = addr + len; + return (void *) addr; +} + +/** + * itcw_init - initialize incremental tcw data structure + * @buffer: address of buffer to use for data structures + * @size: number of bytes in buffer + * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write + * operation tcw + * @intrg: if non-zero, add and initialize an interrogate tcw + * @max_tidaws: maximum number of tidaws to be used for data addressing or zero + * if no tida is to be used. + * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing + * by the interrogate tcw, if specified + * + * Prepare the specified buffer to be used as an incremental tcw, i.e. a + * helper data structure that can be used to construct a valid tcw by + * successive calls to other helper functions. Note: the buffer needs to be + * located below the 2G address limit. The resulting tcw has the following + * restrictions: + * - no tccb tidal + * - input/output tidal is contiguous (no ttic) + * - total data should not exceed 4k + * - tcw specifies either read or write operation + * + * On success, return pointer to the resulting incremental tcw data structure, + * ERR_PTR otherwise. + */ +struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg, + int max_tidaws, int intrg_max_tidaws) +{ + struct itcw *itcw; + void *chunk; + addr_t start; + addr_t end; + + /* Check for 2G limit. */ + start = (addr_t) buffer; + end = start + size; + if (end > (1 << 31)) + return ERR_PTR(-EINVAL); + memset(buffer, 0, size); + /* ITCW. */ + chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0); + if (IS_ERR(chunk)) + return chunk; + itcw = chunk; + itcw->max_tidaws = max_tidaws; + itcw->intrg_max_tidaws = intrg_max_tidaws; + /* Main TCW. */ + chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); + if (IS_ERR(chunk)) + return chunk; + itcw->tcw = chunk; + tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0, + (op == ITCW_OP_WRITE) ? 1 : 0); + /* Interrogate TCW. */ + if (intrg) { + chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); + if (IS_ERR(chunk)) + return chunk; + itcw->intrg_tcw = chunk; + tcw_init(itcw->intrg_tcw, 1, 0); + tcw_set_intrg(itcw->tcw, itcw->intrg_tcw); + } + /* Data TIDAL. */ + if (max_tidaws > 0) { + chunk = fit_chunk(&start, end, sizeof(struct tidaw) * + max_tidaws, 16, 1); + if (IS_ERR(chunk)) + return chunk; + tcw_set_data(itcw->tcw, chunk, 1); + } + /* Interrogate data TIDAL. */ + if (intrg && (intrg_max_tidaws > 0)) { + chunk = fit_chunk(&start, end, sizeof(struct tidaw) * + intrg_max_tidaws, 16, 1); + if (IS_ERR(chunk)) + return chunk; + tcw_set_data(itcw->intrg_tcw, chunk, 1); + } + /* TSB. */ + chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); + if (IS_ERR(chunk)) + return chunk; + tsb_init(chunk); + tcw_set_tsb(itcw->tcw, chunk); + /* Interrogate TSB. */ + if (intrg) { + chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0); + if (IS_ERR(chunk)) + return chunk; + tsb_init(chunk); + tcw_set_tsb(itcw->intrg_tcw, chunk); + } + /* TCCB. */ + chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); + if (IS_ERR(chunk)) + return chunk; + tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT); + tcw_set_tccb(itcw->tcw, chunk); + /* Interrogate TCCB. */ + if (intrg) { + chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0); + if (IS_ERR(chunk)) + return chunk; + tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG); + tcw_set_tccb(itcw->intrg_tcw, chunk); + tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL, + sizeof(struct dcw_intrg_data), 0); + tcw_finalize(itcw->intrg_tcw, 0); + } + return itcw; +} +EXPORT_SYMBOL(itcw_init); + +/** + * itcw_add_dcw - add a dcw to the itcw + * @itcw: address of the itcw + * @cmd: the dcw command + * @flags: flags for the dcw + * @cd: address of control data for this dcw or NULL if none is required + * @cd_count: number of control data bytes for this dcw + * @count: number of data bytes for this dcw + * + * Add a new dcw to the specified itcw by writing the dcw information specified + * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return + * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw + * would exceed the available space. + * + * Note: the tcal field of the tccb header will be updated to reflect added + * content. + */ +struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd, + u8 cd_count, u32 count) +{ + return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd, + flags, cd, cd_count, count); +} +EXPORT_SYMBOL(itcw_add_dcw); + +/** + * itcw_add_tidaw - add a tidaw to the itcw + * @itcw: address of the itcw + * @flags: flags for the new tidaw + * @addr: address value for the new tidaw + * @count: count value for the new tidaw + * + * Add a new tidaw to the input/output data tidaw-list of the specified itcw + * (depending on the value of the r-flag and w-flag). Return a pointer to + * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the + * available space. + * + * Note: the tidaw-list is assumed to be contiguous with no ttics. The + * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. + */ +struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) +{ + if (itcw->num_tidaws >= itcw->max_tidaws) + return ERR_PTR(-ENOSPC); + return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); +} +EXPORT_SYMBOL(itcw_add_tidaw); + +/** + * itcw_set_data - set data address and tida flag of the itcw + * @itcw: address of the itcw + * @addr: the data address + * @use_tidal: zero of the data address specifies a contiguous block of data, + * non-zero if it specifies a list if tidaws. + * + * Set the input/output data address of the itcw (depending on the value of the + * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag + * is set as well. + */ +void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal) +{ + tcw_set_data(itcw->tcw, addr, use_tidal); +} +EXPORT_SYMBOL(itcw_set_data); + +/** + * itcw_finalize - calculate length and count fields of the itcw + * @itcw: address of the itcw + * + * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb. + * In case input- or output-tida is used, the tidaw-list must be stored in + * continuous storage (no ttic). The tcal field in the tccb must be + * up-to-date. + */ +void itcw_finalize(struct itcw *itcw) +{ + tcw_finalize(itcw->tcw, itcw->num_tidaws); +} +EXPORT_SYMBOL(itcw_finalize); diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c deleted file mode 100644 index 445cf364e461..000000000000 --- a/drivers/s390/cio/qdio.c +++ /dev/null @@ -1,3934 +0,0 @@ -/* - * - * linux/drivers/s390/cio/qdio.c - * - * Linux for S/390 QDIO base support, Hipersocket base support - * version 2 - * - * Copyright 2000,2002 IBM Corporation - * Author(s): Utz Bacher <utz.bacher@de.ibm.com> - * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> - * - * Restriction: only 63 iqdio subchannels would have its own indicator, - * after that, subsequent subchannels share one indicator - * - * - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/delay.h> -#include <linux/slab.h> -#include <linux/kernel.h> -#include <linux/proc_fs.h> -#include <linux/timer.h> -#include <linux/mempool.h> -#include <linux/semaphore.h> - -#include <asm/ccwdev.h> -#include <asm/io.h> -#include <asm/atomic.h> -#include <asm/timex.h> - -#include <asm/debug.h> -#include <asm/s390_rdev.h> -#include <asm/qdio.h> -#include <asm/airq.h> - -#include "cio.h" -#include "css.h" -#include "device.h" -#include "qdio.h" -#include "ioasm.h" -#include "chsc.h" - -/****************** MODULE PARAMETER VARIABLES ********************/ -MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); -MODULE_DESCRIPTION("QDIO base support version 2, " \ - "Copyright 2000 IBM Corporation"); -MODULE_LICENSE("GPL"); - -/******************** HERE WE GO ***********************************/ - -static const char version[] = "QDIO base support version 2"; - -static int qdio_performance_stats = 0; -static int proc_perf_file_registration; -static struct qdio_perf_stats perf_stats; - -static int hydra_thinints; -static int is_passthrough = 0; -static int omit_svs; - -static int indicator_used[INDICATORS_PER_CACHELINE]; -static __u32 * volatile indicators; -static __u32 volatile spare_indicator; -static atomic_t spare_indicator_usecount; -#define QDIO_MEMPOOL_SCSSC_ELEMENTS 2 -static mempool_t *qdio_mempool_scssc; -static struct kmem_cache *qdio_q_cache; - -static debug_info_t *qdio_dbf_setup; -static debug_info_t *qdio_dbf_sbal; -static debug_info_t *qdio_dbf_trace; -static debug_info_t *qdio_dbf_sense; -#ifdef CONFIG_QDIO_DEBUG -static debug_info_t *qdio_dbf_slsb_out; -static debug_info_t *qdio_dbf_slsb_in; -#endif /* CONFIG_QDIO_DEBUG */ - -/* iQDIO stuff: */ -static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change - during a while loop */ -static DEFINE_SPINLOCK(ttiq_list_lock); -static void *tiqdio_ind; -static void tiqdio_tl(unsigned long); -static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); - -/* not a macro, as one of the arguments is atomic_read */ -static inline int -qdio_min(int a,int b) -{ - if (a<b) - return a; - else - return b; -} - -/***************** SCRUBBER HELPER ROUTINES **********************/ -#ifdef CONFIG_64BIT -static inline void qdio_perf_stat_inc(atomic64_t *count) -{ - if (qdio_performance_stats) - atomic64_inc(count); -} - -static inline void qdio_perf_stat_dec(atomic64_t *count) -{ - if (qdio_performance_stats) - atomic64_dec(count); -} -#else /* CONFIG_64BIT */ -static inline void qdio_perf_stat_inc(atomic_t *count) -{ - if (qdio_performance_stats) - atomic_inc(count); -} - -static inline void qdio_perf_stat_dec(atomic_t *count) -{ - if (qdio_performance_stats) - atomic_dec(count); -} -#endif /* CONFIG_64BIT */ - -static inline __u64 -qdio_get_micros(void) -{ - return (get_clock() >> 12); /* time>>12 is microseconds */ -} - -/* - * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve - * the q in any case, so that we'll not be interrupted when we are in - * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost - * ever works (last famous words) - */ -static inline int -qdio_reserve_q(struct qdio_q *q) -{ - return atomic_add_return(1,&q->use_count) - 1; -} - -static inline void -qdio_release_q(struct qdio_q *q) -{ - atomic_dec(&q->use_count); -} - -/*check ccq */ -static int -qdio_check_ccq(struct qdio_q *q, unsigned int ccq) -{ - char dbf_text[15]; - - if (ccq == 0 || ccq == 32) - return 0; - if (ccq == 96 || ccq == 97) - return 1; - /*notify devices immediately*/ - sprintf(dbf_text,"%d", ccq); - QDIO_DBF_TEXT2(1,trace,dbf_text); - return -EIO; -} -/* EQBS: extract buffer states */ -static int -qdio_do_eqbs(struct qdio_q *q, unsigned char *state, - unsigned int *start, unsigned int *cnt) -{ - struct qdio_irq *irq; - unsigned int tmp_cnt, q_no, ccq; - int rc ; - char dbf_text[15]; - - ccq = 0; - tmp_cnt = *cnt; - irq = (struct qdio_irq*)q->irq_ptr; - q_no = q->q_no; - if(!q->is_input_q) - q_no += irq->no_input_qs; -again: - ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); - rc = qdio_check_ccq(q, ccq); - if ((ccq == 96) && (tmp_cnt != *cnt)) - rc = 0; - if (rc == 1) { - QDIO_DBF_TEXT5(1,trace,"eqAGAIN"); - goto again; - } - if (rc < 0) { - QDIO_DBF_TEXT2(1,trace,"eqberr"); - sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0, 0, 0, -1, -1, q->int_parm); - return 0; - } - return (tmp_cnt - *cnt); -} - -/* SQBS: set buffer states */ -static int -qdio_do_sqbs(struct qdio_q *q, unsigned char state, - unsigned int *start, unsigned int *cnt) -{ - struct qdio_irq *irq; - unsigned int tmp_cnt, q_no, ccq; - int rc; - char dbf_text[15]; - - ccq = 0; - tmp_cnt = *cnt; - irq = (struct qdio_irq*)q->irq_ptr; - q_no = q->q_no; - if(!q->is_input_q) - q_no += irq->no_input_qs; -again: - ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); - rc = qdio_check_ccq(q, ccq); - if (rc == 1) { - QDIO_DBF_TEXT5(1,trace,"sqAGAIN"); - goto again; - } - if (rc < 0) { - QDIO_DBF_TEXT3(1,trace,"sqberr"); - sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt); - QDIO_DBF_TEXT3(1,trace,dbf_text); - sprintf(dbf_text,"%d,%d",ccq,q_no); - QDIO_DBF_TEXT3(1,trace,dbf_text); - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0, 0, 0, -1, -1, q->int_parm); - return 0; - } - return (tmp_cnt - *cnt); -} - -static inline int -qdio_set_slsb(struct qdio_q *q, unsigned int *bufno, - unsigned char state, unsigned int *count) -{ - volatile char *slsb; - struct qdio_irq *irq; - - irq = (struct qdio_irq*)q->irq_ptr; - if (!irq->is_qebsm) { - slsb = (char *)&q->slsb.acc.val[(*bufno)]; - xchg(slsb, state); - return 1; - } - return qdio_do_sqbs(q, state, bufno, count); -} - -#ifdef CONFIG_QDIO_DEBUG -static inline void -qdio_trace_slsb(struct qdio_q *q) -{ - if (q->queue_type==QDIO_TRACE_QTYPE) { - if (q->is_input_q) - QDIO_DBF_HEX2(0,slsb_in,&q->slsb, - QDIO_MAX_BUFFERS_PER_Q); - else - QDIO_DBF_HEX2(0,slsb_out,&q->slsb, - QDIO_MAX_BUFFERS_PER_Q); - } -} -#endif - -static inline int -set_slsb(struct qdio_q *q, unsigned int *bufno, - unsigned char state, unsigned int *count) -{ - int rc; -#ifdef CONFIG_QDIO_DEBUG - qdio_trace_slsb(q); -#endif - rc = qdio_set_slsb(q, bufno, state, count); -#ifdef CONFIG_QDIO_DEBUG - qdio_trace_slsb(q); -#endif - return rc; -} -static inline int -qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, - unsigned int gpr3) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigasync"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - qdio_perf_stat_inc(&perf_stats.siga_syncs); - - cc = do_siga_sync(q->schid, gpr2, gpr3); - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static inline int -qdio_siga_sync_q(struct qdio_q *q) -{ - if (q->is_input_q) - return qdio_siga_sync(q, 0, q->mask); - return qdio_siga_sync(q, q->mask, 0); -} - -static int -__do_siga_output(struct qdio_q *q, unsigned int *busy_bit) -{ - struct qdio_irq *irq; - unsigned int fc = 0; - unsigned long schid; - - irq = (struct qdio_irq *) q->irq_ptr; - if (!irq->is_qebsm) - schid = *((u32 *)&q->schid); - else { - schid = irq->sch_token; - fc |= 0x80; - } - return do_siga_output(schid, q->mask, busy_bit, fc); -} - -/* - * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns - * an access exception - */ -static int -qdio_siga_output(struct qdio_q *q) -{ - int cc; - __u32 busy_bit; - __u64 start_time=0; - - qdio_perf_stat_inc(&perf_stats.siga_outs); - - QDIO_DBF_TEXT4(0,trace,"sigaout"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - for (;;) { - cc = __do_siga_output(q, &busy_bit); -//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); - if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { - if (!start_time) - start_time=NOW; - if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE) - break; - } else - break; - } - - if ((cc==2) && (busy_bit)) - cc |= QDIO_SIGA_ERROR_B_BIT_SET; - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -static int -qdio_siga_input(struct qdio_q *q) -{ - int cc; - - QDIO_DBF_TEXT4(0,trace,"sigain"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - qdio_perf_stat_inc(&perf_stats.siga_ins); - - cc = do_siga_input(q->schid, q->mask); - - if (cc) - QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); - - return cc; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static __u32 * -qdio_get_indicator(void) -{ - int i; - - for (i = 0; i < INDICATORS_PER_CACHELINE; i++) - if (!indicator_used[i]) { - indicator_used[i]=1; - return indicators+i; - } - atomic_inc(&spare_indicator_usecount); - return (__u32 * volatile) &spare_indicator; -} - -/* locked by the locks in qdio_activate and qdio_cleanup */ -static void -qdio_put_indicator(__u32 *addr) -{ - int i; - - if ( (addr) && (addr!=&spare_indicator) ) { - i=addr-indicators; - indicator_used[i]=0; - } - if (addr == &spare_indicator) - atomic_dec(&spare_indicator_usecount); -} - -static inline void -tiqdio_clear_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"clrsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,0); -} - -static inline void -tiqdio_set_summary_bit(__u32 *location) -{ - QDIO_DBF_TEXT5(0,trace,"setsummb"); - QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); - - xchg(location,-1); -} - -static inline void -tiqdio_sched_tl(void) -{ - tasklet_hi_schedule(&tiqdio_tasklet); -} - -static void -qdio_mark_tiq(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"mark iq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - spin_lock_irqsave(&ttiq_list_lock,flags); - if (unlikely(atomic_read(&q->is_in_shutdown))) - goto out_unlock; - - if (!q->is_input_q) - goto out_unlock; - - if ((q->list_prev) || (q->list_next)) - goto out_unlock; - - if (!tiq_list) { - tiq_list=q; - q->list_prev=q; - q->list_next=q; - } else { - q->list_next=tiq_list; - q->list_prev=tiq_list->list_prev; - tiq_list->list_prev->list_next=q; - tiq_list->list_prev=q; - } - spin_unlock_irqrestore(&ttiq_list_lock,flags); - - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return; -out_unlock: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - return; -} - -static inline void -qdio_mark_q(struct qdio_q *q) -{ - QDIO_DBF_TEXT4(0,trace,"mark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(atomic_read(&q->is_in_shutdown))) - return; - - tasklet_schedule(&q->tasklet); -} - -static int -qdio_stop_polling(struct qdio_q *q) -{ -#ifdef QDIO_USE_PROCESSING_STATE - unsigned int tmp, gsf, count = 1; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - - if (!atomic_xchg(&q->polling,0)) - return 1; - - QDIO_DBF_TEXT4(0,trace,"stoppoll"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* show the card that we are not polling anymore */ - if (!q->is_input_q) - return 1; - - tmp = gsf = GET_SAVED_FRONTIER(q); - tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) ); - set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count); - - /* - * we don't issue this SYNC_MEMORY, as we trust Rick T and - * moreover will not use the PROCESSING state under VM, so - * q->polling was 0 anyway - */ - /*SYNC_MEMORY;*/ - if (irq->is_qebsm) { - count = 1; - qdio_do_eqbs(q, &state, &gsf, &count); - } else - state = q->slsb.acc.val[gsf]; - if (state != SLSB_P_INPUT_PRIMED) - return 1; - /* - * set our summary bit again, as otherwise there is a - * small window we can miss between resetting it and - * checking for PRIMED state - */ - if (q->is_thinint_q) - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - return 0; - -#else /* QDIO_USE_PROCESSING_STATE */ - return 1; -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -/* - * see the comment in do_QDIO and before qdio_reserve_q about the - * sophisticated locking outside of unmark_q, so that we don't need to - * disable the interrupts :-) -*/ -static void -qdio_unmark_q(struct qdio_q *q) -{ - unsigned long flags; - - QDIO_DBF_TEXT4(0,trace,"unmark q"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if ((!q->list_prev)||(!q->list_next)) - return; - - if ((q->is_thinint_q)&&(q->is_input_q)) { - /* iQDIO */ - spin_lock_irqsave(&ttiq_list_lock,flags); - /* in case cleanup has done this already and simultanously - * qdio_unmark_q is called from the interrupt handler, we've - * got to check this in this specific case again */ - if ((!q->list_prev)||(!q->list_next)) - goto out; - if (q->list_next==q) { - /* q was the only interesting q */ - tiq_list=NULL; - q->list_next=NULL; - q->list_prev=NULL; - } else { - q->list_next->list_prev=q->list_prev; - q->list_prev->list_next=q->list_next; - tiq_list=q->list_next; - q->list_next=NULL; - q->list_prev=NULL; - } -out: - spin_unlock_irqrestore(&ttiq_list_lock,flags); - } -} - -static inline unsigned long -tiqdio_clear_global_summary(void) -{ - unsigned long time; - - QDIO_DBF_TEXT5(0,trace,"clrglobl"); - - time = do_clear_global_summary(); - - QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long)); - - return time; -} - - -/************************* OUTBOUND ROUTINES *******************************/ -static int -qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - unsigned char state; - unsigned int cnt, count, ftc; - - irq = (struct qdio_irq *) q->irq_ptr; - if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) - SYNC_MEMORY; - - ftc = q->first_to_check; - count = qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - if (count == 0) - return q->first_to_check; - cnt = qdio_do_eqbs(q, &state, &ftc, &count); - if (cnt == 0) - return q->first_to_check; - switch (state) { - case SLSB_P_OUTPUT_ERROR: - QDIO_DBF_TEXT3(0,trace,"outperr"); - atomic_sub(cnt , &q->number_of_buffers_used); - if (q->qdio_error) - q->error_status_flags |= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error = SLSB_P_OUTPUT_ERROR; - q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; - q->first_to_check = ftc; - break; - case SLSB_P_OUTPUT_EMPTY: - QDIO_DBF_TEXT5(0,trace,"outpempt"); - atomic_sub(cnt, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_CU_OUTPUT_PRIMED: - /* all buffers primed */ - QDIO_DBF_TEXT5(0,trace,"outpprim"); - break; - default: - break; - } - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - return q->first_to_check; -} - -static int -qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - unsigned char state; - int tmp, ftc, count, cnt; - char dbf_text[15]; - - - irq = (struct qdio_irq *) q->irq_ptr; - ftc = q->first_to_check; - count = qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - if (count == 0) - return q->first_to_check; - cnt = qdio_do_eqbs(q, &state, &ftc, &count); - if (cnt == 0) - return q->first_to_check; - switch (state) { - case SLSB_P_INPUT_ERROR : -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(1,trace,"inperr"); - sprintf(dbf_text,"%2x,%2x",ftc,count); - QDIO_DBF_TEXT3(1,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - if (q->qdio_error) - q->error_status_flags |= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error = SLSB_P_INPUT_ERROR; - q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR; - atomic_sub(cnt, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_P_INPUT_PRIMED : - QDIO_DBF_TEXT3(0,trace,"inptprim"); - sprintf(dbf_text,"%2x,%2x",ftc,count); - QDIO_DBF_TEXT3(1,trace,dbf_text); - tmp = 0; - ftc = q->first_to_check; -#ifdef QDIO_USE_PROCESSING_STATE - if (cnt > 1) { - cnt -= 1; - tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); - if (!tmp) - break; - } - cnt = 1; - tmp += set_slsb(q, &ftc, - SLSB_P_INPUT_PROCESSING, &cnt); - atomic_set(&q->polling, 1); -#else - tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt); -#endif - atomic_sub(tmp, &q->number_of_buffers_used); - q->first_to_check = ftc; - break; - case SLSB_CU_INPUT_EMPTY: - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_PROCESSING: - QDIO_DBF_TEXT5(0,trace,"inpnipro"); - break; - default: - break; - } - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - return q->first_to_check; -} - -static int -qdio_get_outbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - volatile char *slsb; - unsigned int count = 1; - int first_not_to_check, f, f_mod_no; - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"getobfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - irq = (struct qdio_irq *) q->irq_ptr; - if (irq->is_qebsm) - return qdio_qebsm_get_outbound_buffer_frontier(q); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * f points to already processed elements, so f+no_used is correct... - * ... but: we don't check 128 buffers, as otherwise - * qdio_has_outbound_q_moved would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - if (((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis)) || - (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) - SYNC_MEMORY; - -check_next: - if (f==first_not_to_check) - goto out; - - switch(slsb[f_mod_no]) { - - /* the adapter has not fetched the output yet */ - case SLSB_CU_OUTPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"outpprim"); - break; - - /* the adapter got it */ - case SLSB_P_OUTPUT_EMPTY: - atomic_dec(&q->number_of_buffers_used); - f++; - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - QDIO_DBF_TEXT5(0,trace,"outpempt"); - goto check_next; - - case SLSB_P_OUTPUT_ERROR: - QDIO_DBF_TEXT3(0,trace,"outperr"); - sprintf(dbf_text,"%x-%x-%x",f_mod_no, - q->sbal[f_mod_no]->element[14].sbalf.value, - q->sbal[f_mod_no]->element[15].sbalf.value); - QDIO_DBF_TEXT3(1,trace,dbf_text); - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count); - - /* - * we increment the frontier, as this buffer - * was processed obviously - */ - atomic_dec(&q->number_of_buffers_used); - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_OUTPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - break; - - /* no new buffers */ - default: - QDIO_DBF_TEXT5(0,trace,"outpni"); - } -out: - return (q->first_to_check=f_mod_no); -} - -/* all buffers are processed */ -static int -qdio_is_outbound_q_done(struct qdio_q *q) -{ - int no_used; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"oqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"oqisdone"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - return (no_used==0); -} - -static int -qdio_has_outbound_q_moved(struct qdio_q *q) -{ - int i; - - i=qdio_get_outbound_buffer_frontier(q); - - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - QDIO_DBF_TEXT4(0,trace,"oqhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"oqhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -static void -qdio_kick_outbound_q(struct qdio_q *q) -{ - int result; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT4(0,trace,"kickoutq"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!q->siga_out) - return; - - /* here's the story with cc=2 and busy bit set (thanks, Rick): - * VM's CP could present us cc=2 and busy bit set on SIGA-write - * during reconfiguration of their Guest LAN (only in HIPERS mode, - * QDIO mode is asynchronous -- cc=2 and busy bit there will take - * the queues down immediately; and not being under VM we have a - * problem on cc=2 and busy bit set right away). - * - * Therefore qdio_siga_output will try for a short time constantly, - * if such a condition occurs. If it doesn't change, it will - * increase the busy_siga_counter and save the timestamp, and - * schedule the queue for later processing (via mark_q, using the - * queue tasklet). __qdio_outbound_processing will check out the - * counter. If non-zero, it will call qdio_kick_outbound_q as often - * as the value of the counter. This will attempt further SIGA - * instructions. For each successful SIGA, the counter is - * decreased, for failing SIGAs the counter remains the same, after - * all. - * After some time of no movement, qdio_kick_outbound_q will - * finally fail and reflect corresponding error codes to call - * the upper layer module and have it take the queues down. - * - * Note that this is a change from the original HiperSockets design - * (saying cc=2 and busy bit means take the queues down), but in - * these days Guest LAN didn't exist... excessive cc=2 with busy bit - * conditions will still take the queues down, but the threshold is - * higher due to the Guest LAN environment. - */ - - - result=qdio_siga_output(q); - - switch (result) { - case 0: - /* went smooth this time, reset timestamp */ -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT3(0,trace,"cc2reslv"); - sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - q->timing.busy_start=0; - break; - case (2|QDIO_SIGA_ERROR_B_BIT_SET): - /* cc=2 and busy bit: */ - atomic_inc(&q->busy_siga_counter); - - /* if the last siga was successful, save - * timestamp here */ - if (!q->timing.busy_start) - q->timing.busy_start=NOW; - - /* if we're in time, don't touch error_status_flags - * and siga_error */ - if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { - qdio_mark_q(q); - break; - } - QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no, - atomic_read(&q->busy_siga_counter)); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - /* else fallthrough and report error */ - default: - /* for plain cc=1, 2 or 3: */ - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|= - QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } -} - -static void -qdio_kick_outbound_handler(struct qdio_q *q) -{ - int start, end, real_end, count; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - start = q->first_element_to_kick; - /* last_move_ftc was just updated */ - real_end = GET_SAVED_FRONTIER(q); - end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)& - (QDIO_MAX_BUFFERS_PER_Q-1); - count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)& - (QDIO_MAX_BUFFERS_PER_Q-1); - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"kickouth"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (q->state==QDIO_IRQ_STATE_ACTIVE) - q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT| - q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; -} - -static void -__qdio_outbound_processing(struct qdio_q *q) -{ - int siga_attempts; - - QDIO_DBF_TEXT4(0,trace,"qoutproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched); - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } - qdio_perf_stat_inc(&perf_stats.outbound_tl_runs); - qdio_perf_stat_inc(&perf_stats.tl_runs); - - /* see comment in qdio_kick_outbound_q */ - siga_attempts=atomic_read(&q->busy_siga_counter); - while (siga_attempts) { - atomic_dec(&q->busy_siga_counter); - qdio_kick_outbound_q(q); - siga_attempts--; - } - - if (qdio_has_outbound_q_moved(q)) - qdio_kick_outbound_handler(q); - - if (q->queue_type == QDIO_ZFCP_QFMT) { - if ((!q->hydra_gives_outbound_pcis) && - (!qdio_is_outbound_q_done(q))) - qdio_mark_q(q); - } - else if (((!q->is_iqdio_q) && (!q->is_pci_out)) || - (q->queue_type == QDIO_IQDIO_QFMT_ASYNCH)) { - /* - * make sure buffer switch from PRIMED to EMPTY is noticed - * and outbound_handler is called - */ - if (qdio_is_outbound_q_done(q)) { - del_timer(&q->timer); - } else { - if (!timer_pending(&q->timer)) - mod_timer(&q->timer, jiffies + - QDIO_FORCE_CHECK_TIMEOUT); - } - } - - qdio_release_q(q); -} - -static void -qdio_outbound_processing(unsigned long q) -{ - __qdio_outbound_processing((struct qdio_q *) q); -} - -/************************* INBOUND ROUTINES *******************************/ - - -static int -qdio_get_inbound_buffer_frontier(struct qdio_q *q) -{ - struct qdio_irq *irq; - int f,f_mod_no; - volatile char *slsb; - unsigned int count = 1; - int first_not_to_check; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif /* CONFIG_QDIO_DEBUG */ -#ifdef QDIO_USE_PROCESSING_STATE - int last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"getibfro"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - irq = (struct qdio_irq *) q->irq_ptr; - if (irq->is_qebsm) - return qdio_qebsm_get_inbound_buffer_frontier(q); - - slsb=&q->slsb.acc.val[0]; - f_mod_no=f=q->first_to_check; - /* - * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved - * would return 0 - */ - first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), - (QDIO_MAX_BUFFERS_PER_Q-1)); - - /* - * we don't use this one, as a PCI or we after a thin interrupt - * will sync the queues - */ - /* SYNC_MEMORY;*/ - -check_next: - f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); - if (f==first_not_to_check) - goto out; - switch (slsb[f_mod_no]) { - - /* CU_EMPTY means frontier is reached */ - case SLSB_CU_INPUT_EMPTY: - QDIO_DBF_TEXT5(0,trace,"inptempt"); - break; - - /* P_PRIMED means set slsb to P_PROCESSING and move on */ - case SLSB_P_INPUT_PRIMED: - QDIO_DBF_TEXT5(0,trace,"inptprim"); - -#ifdef QDIO_USE_PROCESSING_STATE - /* - * as soon as running under VM, polling the input queues will - * kill VM in terms of CP overhead - */ - if (q->siga_sync) { - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); - } else { - /* set the previous buffer to NOT_INIT. The current - * buffer will be set to PROCESSING at the end of - * this function to avoid further interrupts. */ - if (last_position>=0) - set_slsb(q, &last_position, - SLSB_P_INPUT_NOT_INIT, &count); - atomic_set(&q->polling,1); - last_position=f_mod_no; - } -#else /* QDIO_USE_PROCESSING_STATE */ - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); -#endif /* QDIO_USE_PROCESSING_STATE */ - /* - * not needed, as the inbound queue will be synced on the next - * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s - */ - /*SYNC_MEMORY;*/ - f++; - atomic_dec(&q->number_of_buffers_used); - goto check_next; - - case SLSB_P_INPUT_NOT_INIT: - case SLSB_P_INPUT_PROCESSING: - QDIO_DBF_TEXT5(0,trace,"inpnipro"); - break; - - /* P_ERROR means frontier is reached, break and report error */ - case SLSB_P_INPUT_ERROR: -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"inperr%2x",f_mod_no); - QDIO_DBF_TEXT3(1,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); - - /* kind of process the buffer */ - set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count); - - if (q->qdio_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; - q->qdio_error=SLSB_P_INPUT_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - - /* we increment the frontier, as this buffer - * was processed obviously */ - f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - atomic_dec(&q->number_of_buffers_used); - -#ifdef QDIO_USE_PROCESSING_STATE - last_position=-1; -#endif /* QDIO_USE_PROCESSING_STATE */ - - break; - - /* everything else means frontier not changed (HALTED or so) */ - default: - break; - } -out: - q->first_to_check=f_mod_no; - -#ifdef QDIO_USE_PROCESSING_STATE - if (last_position>=0) - set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count); -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); - - return q->first_to_check; -} - -static int -qdio_has_inbound_q_moved(struct qdio_q *q) -{ - int i; - - i=qdio_get_inbound_buffer_frontier(q); - if ( (i!=GET_SAVED_FRONTIER(q)) || - (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { - SAVE_FRONTIER(q,i); - if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis)) - SAVE_TIMESTAMP(q); - - QDIO_DBF_TEXT4(0,trace,"inhasmvd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } else { - QDIO_DBF_TEXT4(0,trace,"inhsntmv"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } -} - -/* means, no more buffers to be filled */ -static int -tiqdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; - unsigned int start_buf, count; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* propagate the change from 82 to 80 through VM */ - SYNC_MEMORY; - -#ifdef CONFIG_QDIO_DEBUG - if (no_used) { - sprintf(dbf_text,"iqisnt%02x",no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); - } else { - QDIO_DBF_TEXT4(0,trace,"iniqisdo"); - } - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!no_used) - return 1; - if (irq->is_qebsm) { - count = 1; - start_buf = q->first_to_check; - qdio_do_eqbs(q, &state, &start_buf, &count); - } else - state = q->slsb.acc.val[q->first_to_check]; - if (state != SLSB_P_INPUT_PRIMED) - /* - * nothing more to do, if next buffer is not PRIMED. - * note that we did a SYNC_MEMORY before, that there - * has been a sychnronization. - * we will return 0 below, as there is nothing to do - * (stop_polling not necessary, as we have not been - * using the PROCESSING state - */ - return 0; - - /* - * ok, the next input buffer is primed. that means, that device state - * change indicator and adapter local summary are set, so we will find - * it next time. - * we will return 0 below, as there is nothing to do, except scheduling - * ourselves for the next time. - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; -} - -static int -qdio_is_inbound_q_done(struct qdio_q *q) -{ - int no_used; - unsigned int start_buf, count; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - no_used=atomic_read(&q->number_of_buffers_used); - - /* - * we need that one for synchronization with the adapter, as it - * does a kind of PCI avoidance - */ - SYNC_MEMORY; - - if (!no_used) { - QDIO_DBF_TEXT4(0,trace,"inqisdnA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 1; - } - if (irq->is_qebsm) { - count = 1; - start_buf = q->first_to_check; - qdio_do_eqbs(q, &state, &start_buf, &count); - } else - state = q->slsb.acc.val[q->first_to_check]; - if (state == SLSB_P_INPUT_PRIMED) { - /* we got something to do */ - QDIO_DBF_TEXT4(0,trace,"inqisntA"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - return 0; - } - - /* on VM, we don't poll, so the q is always done here */ - if (q->siga_sync) - return 1; - if (q->hydra_gives_outbound_pcis) - return 1; - - /* - * at this point we know, that inbound first_to_check - * has (probably) not moved (see qdio_inbound_processing) - */ - if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisdon"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 1; - } else { -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0,trace,"inqisntd"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; - } -} - -static void -qdio_kick_inbound_handler(struct qdio_q *q) -{ - int count, start, end, real_end, i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; -#endif - - QDIO_DBF_TEXT4(0,trace,"kickinh"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - start=q->first_element_to_kick; - real_end=q->first_to_check; - end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1); - - i=start; - count=0; - while (1) { - count++; - if (i==end) - break; - i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1); - } - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text,"s=%2xc=%2x",start,count); - QDIO_DBF_TEXT4(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (likely(q->state==QDIO_IRQ_STATE_ACTIVE)) - q->handler(q->cdev, - QDIO_STATUS_INBOUND_INT|q->error_status_flags, - q->qdio_error,q->siga_error,q->q_no,start,count, - q->int_parm); - - /* for the next time: */ - q->first_element_to_kick=real_end; - q->qdio_error=0; - q->siga_error=0; - q->error_status_flags=0; - - qdio_perf_stat_inc(&perf_stats.inbound_cnt); -} - -static void -__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *oq; - int i; - - QDIO_DBF_TEXT4(0,trace,"iqinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - /* - * we first want to reserve the q, so that we know, that we don't - * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might - * be set - */ - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - tiqdio_sched_tl(); - return; - } - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs); - if (unlikely(atomic_read(&q->is_in_shutdown))) { - qdio_unmark_q(q); - goto out; - } - - /* - * we reset spare_ind_was_set, when the queue does not use the - * spare indicator - */ - if (spare_ind_was_set) - spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator); - - if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set) - goto out; - /* - * q->dev_st_chg_ind is the indicator, be it shared or not. - * only clear it, if indicator is non-shared - */ - if (q->dev_st_chg_ind != &spare_indicator) - tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); - - if (q->hydra_gives_outbound_pcis) { - if (!q->siga_sync_done_on_thinints) { - SYNC_MEMORY_ALL; - } else if (!q->siga_sync_done_on_outb_tis) { - SYNC_MEMORY_ALL_OUTB; - } - } else { - SYNC_MEMORY; - } - /* - * maybe we have to do work on our outbound queues... at least - * we have to check the outbound-int-capable thinint-capable - * queues - */ - if (q->hydra_gives_outbound_pcis) { - irq_ptr = (struct qdio_irq*)q->irq_ptr; - for (i=0;i<irq_ptr->no_output_qs;i++) { - oq = irq_ptr->output_qs[i]; - if (!qdio_is_outbound_q_done(oq)) { - qdio_perf_stat_dec(&perf_stats.tl_runs); - __qdio_outbound_processing(oq); - } - } - } - - if (!qdio_has_inbound_q_moved(q)) - goto out; - - qdio_kick_inbound_handler(q); - if (tiqdio_is_inbound_q_done(q)) - if (!qdio_stop_polling(q)) { - /* - * we set the flags to get into the stuff next time, - * see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - } -out: - qdio_release_q(q); -} - -static void -tiqdio_inbound_processing(unsigned long q) -{ - __tiqdio_inbound_processing((struct qdio_q *) q, - atomic_read(&spare_indicator_usecount)); -} - -static void -__qdio_inbound_processing(struct qdio_q *q) -{ - int q_laps=0; - - QDIO_DBF_TEXT4(0,trace,"qinproc"); - QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched); - /* as we're sissies, we'll check next time */ - if (likely(!atomic_read(&q->is_in_shutdown))) { - qdio_mark_q(q); - QDIO_DBF_TEXT4(0,trace,"busy,agn"); - } - return; - } - qdio_perf_stat_inc(&perf_stats.inbound_tl_runs); - qdio_perf_stat_inc(&perf_stats.tl_runs); - -again: - if (qdio_has_inbound_q_moved(q)) { - qdio_kick_inbound_handler(q); - if (!qdio_stop_polling(q)) { - q_laps++; - if (q_laps<QDIO_Q_LAPS) - goto again; - } - qdio_mark_q(q); - } else { - if (!qdio_is_inbound_q_done(q)) - /* means poll time is not yet over */ - qdio_mark_q(q); - } - - qdio_release_q(q); -} - -static void -qdio_inbound_processing(unsigned long q) -{ - __qdio_inbound_processing((struct qdio_q *) q); -} - -/************************* MAIN ROUTINES *******************************/ - -#ifdef QDIO_USE_PROCESSING_STATE -static int -tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) -{ - if (!q) { - tiqdio_sched_tl(); - return 0; - } - - /* - * under VM, we have not used the PROCESSING state, so no - * need to stop polling - */ - if (q->siga_sync) - return 2; - - if (unlikely(qdio_reserve_q(q))) { - qdio_release_q(q); - qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched); - /* - * as we might just be about to stop polling, we make - * sure that we check again at least once more - */ - - /* - * sanity -- we'd get here without setting the - * dev st chg ind - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - return 0; - } - if (qdio_stop_polling(q)) { - qdio_release_q(q); - return 2; - } - if (q_laps<QDIO_Q_LAPS-1) { - qdio_release_q(q); - return 3; - } - /* - * we set the flags to get into the stuff - * next time, see also comment in qdio_stop_polling - */ - tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); - tiqdio_sched_tl(); - qdio_release_q(q); - return 1; - -} -#endif /* QDIO_USE_PROCESSING_STATE */ - -static void -tiqdio_inbound_checks(void) -{ - struct qdio_q *q; - int spare_ind_was_set=0; -#ifdef QDIO_USE_PROCESSING_STATE - int q_laps=0; -#endif /* QDIO_USE_PROCESSING_STATE */ - - QDIO_DBF_TEXT4(0,trace,"iqdinbck"); - QDIO_DBF_TEXT5(0,trace,"iqlocsum"); - -#ifdef QDIO_USE_PROCESSING_STATE -again: -#endif /* QDIO_USE_PROCESSING_STATE */ - - /* when the spare indicator is used and set, save that and clear it */ - if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) { - spare_ind_was_set = 1; - tiqdio_clear_summary_bit((__u32*)&spare_indicator); - } - - q=(struct qdio_q*)tiq_list; - do { - if (!q) - break; - __tiqdio_inbound_processing(q, spare_ind_was_set); - q=(struct qdio_q*)q->list_next; - } while (q!=(struct qdio_q*)tiq_list); - -#ifdef QDIO_USE_PROCESSING_STATE - q=(struct qdio_q*)tiq_list; - do { - int ret; - - ret = tiqdio_reset_processing_state(q, q_laps); - switch (ret) { - case 0: - return; - case 1: - q_laps++; - case 2: - q = (struct qdio_q*)q->list_next; - break; - default: - q_laps++; - goto again; - } - } while (q!=(struct qdio_q*)tiq_list); -#endif /* QDIO_USE_PROCESSING_STATE */ -} - -static void -tiqdio_tl(unsigned long data) -{ - QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); - - qdio_perf_stat_inc(&perf_stats.tl_runs); - - tiqdio_inbound_checks(); -} - -/********************* GENERAL HELPER_ROUTINES ***********************/ - -static void -qdio_release_irq_memory(struct qdio_irq *irq_ptr) -{ - int i; - struct qdio_q *q; - - for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { - q = irq_ptr->input_qs[i]; - if (q) { - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); - } - q = irq_ptr->output_qs[i]; - if (q) { - free_page((unsigned long) q->slib); - kmem_cache_free(qdio_q_cache, q); - } - } - free_page((unsigned long) irq_ptr->qdr); - free_page((unsigned long) irq_ptr); -} - -static void -qdio_set_impl_params(struct qdio_irq *irq_ptr, - unsigned int qib_param_field_format, - /* pointer to 128 bytes or NULL, if no param field */ - unsigned char *qib_param_field, - /* pointer to no_queues*128 words of data or NULL */ - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned long *input_slib_elements, - unsigned long *output_slib_elements) -{ - int i,j; - - if (!irq_ptr) - return; - - irq_ptr->qib.pfmt=qib_param_field_format; - if (qib_param_field) - memcpy(irq_ptr->qib.parm,qib_param_field, - QDIO_MAX_BUFFERS_PER_Q); - - if (input_slib_elements) - for (i=0;i<no_input_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->input_qs[i]->slib->slibe[j].parms= - input_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } - if (output_slib_elements) - for (i=0;i<no_output_qs;i++) { - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - irq_ptr->output_qs[i]->slib->slibe[j].parms= - output_slib_elements[ - i*QDIO_MAX_BUFFERS_PER_Q+j]; - } -} - -static int -qdio_alloc_qs(struct qdio_irq *irq_ptr, - int no_input_qs, int no_output_qs) -{ - int i; - struct qdio_q *q; - - for (i = 0; i < no_input_qs; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); - if (!q) - return -ENOMEM; - memset(q, 0, sizeof(*q)); - - q->slib = (struct slib *) __get_free_page(GFP_KERNEL); - if (!q->slib) { - kmem_cache_free(qdio_q_cache, q); - return -ENOMEM; - } - irq_ptr->input_qs[i]=q; - } - - for (i = 0; i < no_output_qs; i++) { - q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); - if (!q) - return -ENOMEM; - memset(q, 0, sizeof(*q)); - - q->slib = (struct slib *) __get_free_page(GFP_KERNEL); - if (!q->slib) { - kmem_cache_free(qdio_q_cache, q); - return -ENOMEM; - } - irq_ptr->output_qs[i]=q; - } - return 0; -} - -static void -qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, - int no_input_qs, int no_output_qs, - qdio_handler_t *input_handler, - qdio_handler_t *output_handler, - unsigned long int_parm,int q_format, - unsigned long flags, - void **inbound_sbals_array, - void **outbound_sbals_array) -{ - struct qdio_q *q; - int i,j; - char dbf_text[20]; /* see qdio_initialize */ - void *ptr; - int available; - - sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - - memset(q,0,((char*)&q->slib)-((char*)q)); - sprintf(dbf_text,"in-q%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(inbound_sbals_array++); - - q->queue_type=q_format; - q->int_parm=int_parm; - q->schid = irq_ptr->schid; - q->irq_ptr = irq_ptr; - q->cdev = cdev; - q->mask=1<<(31-i); - q->q_no=i; - q->is_input_q=1; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=input_handler; - q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind; - - /* q->is_thinint_q isn't valid at this time, but - * irq_ptr->is_thinint_irq is - */ - if (irq_ptr->is_thinint_irq) - tasklet_init(&q->tasklet, tiqdio_inbound_processing, - (unsigned long) q); - else - tasklet_init(&q->tasklet, qdio_inbound_processing, - (unsigned long) q); - - /* actually this is not used for inbound queues. yet. */ - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - -/* for (j=0;j<QDIO_STATS_NUMBER;j++) - q->timing.last_transfer_times[j]=(qdio_get_micros()/ - QDIO_STATS_NUMBER)*j; - q->timing.last_transfer_index=QDIO_STATS_NUMBER-1; -*/ - - /* fill in slib */ - if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - if (!irq_ptr->is_qebsm) { - unsigned int count = 1; - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count); - } - } - - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - memset(q,0,((char*)&q->slib)-((char*)q)); - - sprintf(dbf_text,"outq%4x",i); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); - - memset(q->slib,0,PAGE_SIZE); - q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); - - available=0; - - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sbal[j]=*(outbound_sbals_array++); - - q->queue_type=q_format; - if ((q->queue_type == QDIO_IQDIO_QFMT) && - (no_output_qs > 1) && - (i == no_output_qs-1)) - q->queue_type = QDIO_IQDIO_QFMT_ASYNCH; - q->int_parm=int_parm; - q->is_input_q=0; - q->is_pci_out = 0; - q->schid = irq_ptr->schid; - q->cdev = cdev; - q->irq_ptr = irq_ptr; - q->mask=1<<(31-i); - q->q_no=i; - q->first_to_check=0; - q->last_move_ftc=0; - q->handler=output_handler; - - tasklet_init(&q->tasklet, qdio_outbound_processing, - (unsigned long) q); - setup_timer(&q->timer, qdio_outbound_processing, - (unsigned long) q); - - atomic_set(&q->busy_siga_counter,0); - q->timing.busy_start=0; - - /* fill in slib */ - if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba= - (unsigned long)(q->slib); - q->slib->sla=(unsigned long)(q->sl); - q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); - - /* fill in sl */ - for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) - q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); - - QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); - ptr=(void*)q->sl; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)&q->slsb; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - ptr=(void*)q->sbal[0]; - QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); - - /* fill in slsb */ - if (!irq_ptr->is_qebsm) { - unsigned int count = 1; - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count); - } - } -} - -static void -qdio_fill_thresholds(struct qdio_irq *irq_ptr, - unsigned int no_input_qs, - unsigned int no_output_qs, - unsigned int min_input_threshold, - unsigned int max_input_threshold, - unsigned int min_output_threshold, - unsigned int max_output_threshold) -{ - int i; - struct qdio_q *q; - - for (i=0;i<no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - q->timing.threshold=max_input_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_input_threshold+ - (max_input_threshold-min_input_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } - for (i=0;i<no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - q->timing.threshold=max_output_threshold; -/* for (j=0;j<QDIO_STATS_CLASSES;j++) { - q->threshold_classes[j].threshold= - min_output_threshold+ - (max_output_threshold-min_output_threshold)/ - QDIO_STATS_CLASSES; - } - qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ - } -} - -static void tiqdio_thinint_handler(void *ind, void *drv_data) -{ - QDIO_DBF_TEXT4(0,trace,"thin_int"); - - qdio_perf_stat_inc(&perf_stats.thinints); - - /* SVS only when needed: - * issue SVS to benefit from iqdio interrupt avoidance - * (SVS clears AISOI)*/ - if (!omit_svs) - tiqdio_clear_global_summary(); - - tiqdio_inbound_checks(); -} - -static void -qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) -{ - int i; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]; - - QDIO_DBF_TEXT5(0,trace,"newstate"); - sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state); - QDIO_DBF_TEXT5(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - irq_ptr->state=state; - for (i=0;i<irq_ptr->no_input_qs;i++) - irq_ptr->input_qs[i]->state=state; - for (i=0;i<irq_ptr->no_output_qs;i++) - irq_ptr->output_qs[i]->state=state; - mb(); -} - -static void -qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) -{ - char dbf_text[15]; - - if (irb->esw.esw0.erw.cons) { - sprintf(dbf_text,"sens%4x",schid.sch_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); - - QDIO_PRINT_WARN("sense data available on qdio channel.\n"); - QDIO_HEXDUMP16(WARN,"irb: ",irb); - QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw); - } - -} - -static void -qdio_handle_pci(struct qdio_irq *irq_ptr) -{ - int i; - struct qdio_q *q; - - qdio_perf_stat_inc(&perf_stats.pcis); - for (i=0;i<irq_ptr->no_input_qs;i++) { - q=irq_ptr->input_qs[i]; - if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) - qdio_mark_q(q); - else { - qdio_perf_stat_dec(&perf_stats.tl_runs); - __qdio_inbound_processing(q); - } - } - if (!irq_ptr->hydra_gives_outbound_pcis) - return; - for (i=0;i<irq_ptr->no_output_qs;i++) { - q=irq_ptr->output_qs[i]; - if (qdio_is_outbound_q_done(q)) - continue; - qdio_perf_stat_dec(&perf_stats.tl_runs); - if (!irq_ptr->sync_done_on_outb_pcis) - SYNC_MEMORY; - __qdio_outbound_processing(q); - } -} - -static void qdio_establish_handle_irq(struct ccw_device*, int, int); - -static void -qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, - int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - struct qdio_q *q; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - QDIO_DBF_TEXT2(1, trace, "ick2"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on activate " \ - "queues on device %s (cs=x%x, ds=x%x).\n", - cdev->dev.bus_id, cstat, dstat); - if (irq_ptr->no_input_qs) { - q=irq_ptr->input_qs[0]; - } else if (irq_ptr->no_output_qs) { - q=irq_ptr->output_qs[0]; - } else { - QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n", - cdev->dev.bus_id); - goto omit_handler_call; - } - q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| - QDIO_STATUS_LOOK_FOR_ERROR, - 0,0,0,-1,-1,q->int_parm); -omit_handler_call: - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED); - -} - -static void -qdio_call_shutdown(struct work_struct *work) -{ - struct ccw_device_private *priv; - struct ccw_device *cdev; - - priv = container_of(work, struct ccw_device_private, kick_work); - cdev = priv->cdev; - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - put_device(&cdev->dev); -} - -static void -qdio_timeout_handler(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - QDIO_DBF_TEXT2(0, trace, "qtoh"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - irq_ptr = cdev->private->qdio_data; - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT2(0, trace, dbf_text); - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1,setup,"eq:timeo"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_CLEANUP: - QDIO_PRINT_INFO("Did not get interrupt on cleanup, " - "irq=0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - /* I/O has been terminated by common I/O layer. */ - QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1, trace, "cio:term"); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - if (get_device(&cdev->dev)) { - /* Can't call shutdown from interrupt context. */ - PREPARE_WORK(&cdev->private->kick_work, - qdio_call_shutdown); - queue_work(ccw_device_work, &cdev->private->kick_work); - } - break; - default: - BUG(); - } - ccw_device_set_timeout(cdev, 0); - wake_up(&cdev->private->wait_q); -} - -static void -qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) -{ - struct qdio_irq *irq_ptr; - int cstat,dstat; - char dbf_text[15]; - -#ifdef CONFIG_QDIO_DEBUG - QDIO_DBF_TEXT4(0, trace, "qint"); - sprintf(dbf_text, "%s", cdev->dev.bus_id); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (!intparm) { - QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \ - "handler, device %s\n", cdev->dev.bus_id); - return; - } - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) { - QDIO_DBF_TEXT2(1, trace, "uint"); - sprintf(dbf_text,"%s", cdev->dev.bus_id); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_PRINT_ERR("received interrupt on unused device %s!\n", - cdev->dev.bus_id); - return; - } - - if (IS_ERR(irb)) { - /* Currently running i/o is in error. */ - switch (PTR_ERR(irb)) { - case -EIO: - QDIO_PRINT_ERR("i/o error on device %s\n", - cdev->dev.bus_id); - return; - case -ETIMEDOUT: - qdio_timeout_handler(cdev); - return; - default: - QDIO_PRINT_ERR("unknown error state %ld on device %s\n", - PTR_ERR(irb), cdev->dev.bus_id); - return; - } - } - - qdio_irq_check_sense(irq_ptr->schid, irb); - -#ifdef CONFIG_QDIO_DEBUG - sprintf(dbf_text, "state:%d", irq_ptr->state); - QDIO_DBF_TEXT4(0, trace, dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - cstat = irb->scsw.cstat; - dstat = irb->scsw.dstat; - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - qdio_establish_handle_irq(cdev, cstat, dstat); - break; - - case QDIO_IRQ_STATE_CLEANUP: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - break; - - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - if (cstat & SCHN_STAT_PCI) { - qdio_handle_pci(irq_ptr); - break; - } - - if ((cstat&~SCHN_STAT_PCI)||dstat) { - qdio_handle_activate_check(cdev, intparm, cstat, dstat); - break; - } - default: - QDIO_PRINT_ERR("got interrupt for queues in state %d on " \ - "device %s?!\n", - irq_ptr->state, cdev->dev.bus_id); - } - wake_up(&cdev->private->wait_q); - -} - -int -qdio_synchronize(struct ccw_device *cdev, unsigned int flags, - unsigned int queue_number) -{ - int cc = 0; - struct qdio_q *q; - struct qdio_irq *irq_ptr; - void *ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[15]="SyncXXXX"; -#endif - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); - *((int*)(&dbf_text[0]))=flags; - *((int*)(&dbf_text[4]))=queue_number; - QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); -#endif /* CONFIG_QDIO_DEBUG */ - - if (flags&QDIO_FLAG_SYNC_INPUT) { - q=irq_ptr->input_qs[queue_number]; - if (!q) - return -EINVAL; - if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(q->schid, 0, q->mask); - } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { - q=irq_ptr->output_qs[queue_number]; - if (!q) - return -EINVAL; - if (!(irq_ptr->is_qebsm)) - cc = do_siga_sync(q->schid, q->mask, 0); - } else - return -EINVAL; - - ptr=&cc; - if (cc) - QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int)); - - return cc; -} - -static int -qdio_get_ssqd_information(struct subchannel_id *schid, - struct qdio_chsc_ssqd **ssqd_area) -{ - int result; - - QDIO_DBF_TEXT0(0, setup, "getssqd"); - *ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!ssqd_area) { - QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n", - schid->sch_no); - return -ENOMEM; - } - - (*ssqd_area)->request = (struct chsc_header) { - .length = 0x0010, - .code = 0x0024, - }; - (*ssqd_area)->first_sch = schid->sch_no; - (*ssqd_area)->last_sch = schid->sch_no; - (*ssqd_area)->ssid = schid->ssid; - result = chsc(*ssqd_area); - - if (result) { - QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n", - result, schid->ssid, schid->sch_no); - goto out; - } - - if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n", - (*ssqd_area)->response.code, - schid->ssid, schid->sch_no); - goto out; - } - if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) || - !((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) || - ((*ssqd_area)->sch != schid->sch_no)) { - QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \ - "using all SIGAs.\n", - schid->ssid, schid->sch_no); - goto out; - } - return 0; -out: - return -EINVAL; -} - -int -qdio_get_ssqd_pct(struct ccw_device *cdev) -{ - struct qdio_chsc_ssqd *ssqd_area; - struct subchannel_id schid; - char dbf_text[15]; - int rc; - int pct = 0; - - QDIO_DBF_TEXT0(0, setup, "getpct"); - schid = ccw_device_get_subchannel_id(cdev); - rc = qdio_get_ssqd_information(&schid, &ssqd_area); - if (!rc) - pct = (int)ssqd_area->pct; - if (rc != -ENOMEM) - mempool_free(ssqd_area, qdio_mempool_scssc); - sprintf(dbf_text, "pct: %d", pct); - QDIO_DBF_TEXT2(0, setup, dbf_text); - return pct; -} -EXPORT_SYMBOL(qdio_get_ssqd_pct); - -static void -qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token) -{ - struct qdio_q *q; - int i; - unsigned int count, start_buf; - char dbf_text[15]; - - /*check if QEBSM is disabled */ - if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) { - irq_ptr->is_qebsm = 0; - irq_ptr->sch_token = 0; - irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; - QDIO_DBF_TEXT0(0,setup,"noV=V"); - return; - } - irq_ptr->sch_token = token; - /*input queue*/ - for (i = 0; i < irq_ptr->no_input_qs;i++) { - q = irq_ptr->input_qs[i]; - count = QDIO_MAX_BUFFERS_PER_Q; - start_buf = 0; - set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count); - } - sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"%8lx",irq_ptr->sch_token); - QDIO_DBF_TEXT0(0,setup,dbf_text); - /*output queue*/ - for (i = 0; i < irq_ptr->no_output_qs; i++) { - q = irq_ptr->output_qs[i]; - count = QDIO_MAX_BUFFERS_PER_Q; - start_buf = 0; - set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count); - } -} - -static void -qdio_get_ssqd_siga(struct qdio_irq *irq_ptr) -{ - int rc; - struct qdio_chsc_ssqd *ssqd_area; - - QDIO_DBF_TEXT0(0,setup,"getssqd"); - irq_ptr->qdioac = 0; - rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area); - if (rc) { - QDIO_PRINT_WARN("using all SIGAs for sch x%x.n", - irq_ptr->schid.sch_no); - irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | - CHSC_FLAG_SIGA_OUTPUT_NECESSARY | - CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ - irq_ptr->is_qebsm = 0; - } else - irq_ptr->qdioac = ssqd_area->qdioac1; - - qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token); - if (rc != -ENOMEM) - mempool_free(ssqd_area, qdio_mempool_scssc); -} - -static unsigned int -tiqdio_check_chsc_availability(void) -{ - char dbf_text[15]; - - if (!css_characteristics_avail) - return -EIO; - - /* Check for bit 41. */ - if (!css_general_characteristics.aif) { - QDIO_PRINT_WARN("Adapter interruption facility not " \ - "installed.\n"); - return -ENOENT; - } - - /* Check for bits 107 and 108. */ - if (!css_chsc_characteristics.scssc || - !css_chsc_characteristics.scsscf) { - QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \ - "not available.\n"); - return -ENOENT; - } - - /* Check for OSA/FCP thin interrupts (bit 67). */ - hydra_thinints = css_general_characteristics.aif_osa; - sprintf(dbf_text,"hydrati%1x", hydra_thinints); - QDIO_DBF_TEXT0(0,setup,dbf_text); - -#ifdef CONFIG_64BIT - /* Check for QEBSM support in general (bit 58). */ - is_passthrough = css_general_characteristics.qebsm; -#endif - sprintf(dbf_text,"cssQBS:%1x", is_passthrough); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* Check for aif time delay disablement fac (bit 56). If installed, - * omit svs even under lpar (good point by rick again) */ - omit_svs = css_general_characteristics.aif_tdd; - sprintf(dbf_text,"omitsvs%1x", omit_svs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - return 0; -} - - -static unsigned int -tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) -{ - unsigned long real_addr_local_summary_bit; - unsigned long real_addr_dev_st_chg_ind; - void *ptr; - char dbf_text[15]; - - unsigned int resp_code; - int result; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u64 summary_indicator_addr; - u64 subchannel_indicator_addr; - u32 ks:4; - u32 kc:4; - u32 reserved4:21; - u32 isc:3; - u32 word_with_d_bit; - /* set to 0x10000000 to enable - * time delay disablement facility */ - u32 reserved5; - struct subchannel_id schid; - u32 reserved6[1004]; - struct chsc_header response; - u32 reserved7; - } *scssc_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - if (reset_to_zero) { - real_addr_local_summary_bit=0; - real_addr_dev_st_chg_ind=0; - } else { - real_addr_local_summary_bit= - virt_to_phys((volatile void *)tiqdio_ind); - real_addr_dev_st_chg_ind= - virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); - } - - scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!scssc_area) { - QDIO_PRINT_WARN("No memory for setting indicators on " \ - "subchannel 0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - return -ENOMEM; - } - scssc_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x0021, - }; - scssc_area->operation_code = 0; - - scssc_area->summary_indicator_addr = real_addr_local_summary_bit; - scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind; - scssc_area->ks = QDIO_STORAGE_KEY; - scssc_area->kc = QDIO_STORAGE_KEY; - scssc_area->isc = TIQDIO_THININT_ISC; - scssc_area->schid = irq_ptr->schid; - /* enables the time delay disablement facility. Don't care - * whether it is really there (i.e. we haven't checked for - * it) */ - if (css_general_characteristics.aif_tdd) - scssc_area->word_with_d_bit = 0x10000000; - else - QDIO_PRINT_WARN("Time delay disablement facility " \ - "not available\n"); - - result = chsc(scssc_area); - if (result) { - QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \ - "cc=%i.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result); - result = -EIO; - goto out; - } - - resp_code = scssc_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting indicators " \ - "is 0x%x.\n",resp_code); - sprintf(dbf_text,"sidR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scssc_area->response; - QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN); - result = -EIO; - goto out; - } - - QDIO_DBF_TEXT2(0,setup,"setscind"); - QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit, - sizeof(unsigned long)); - QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long)); - result = 0; -out: - mempool_free(scssc_area, qdio_mempool_scssc); - return result; - -} - -static unsigned int -tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) -{ - unsigned int resp_code; - int result; - void *ptr; - char dbf_text[15]; - - struct { - struct chsc_header request; - u16 operation_code; - u16 reserved1; - u32 reserved2; - u32 reserved3; - u32 reserved4[2]; - u32 delay_target; - u32 reserved5[1009]; - struct chsc_header response; - u32 reserved6; - } *scsscf_area; - - if (!irq_ptr->is_thinint_irq) - return -ENODEV; - - scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC); - if (!scsscf_area) { - QDIO_PRINT_WARN("No memory for setting delay target on " \ - "subchannel 0.%x.%x.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - return -ENOMEM; - } - scsscf_area->request = (struct chsc_header) { - .length = 0x0fe0, - .code = 0x1027, - }; - - scsscf_area->delay_target = delay_target<<16; - - result=chsc(scsscf_area); - if (result) { - QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \ - "cc=%i. Continuing.\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result); - result = -EIO; - goto out; - } - - resp_code = scsscf_area->response.code; - if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { - QDIO_PRINT_WARN("response upon setting delay target " \ - "is 0x%x. Continuing.\n",resp_code); - sprintf(dbf_text,"sdtR%4x",resp_code); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT1(0,setup,dbf_text); - ptr=&scsscf_area->response; - QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN); - } - QDIO_DBF_TEXT2(0,trace,"delytrgt"); - QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long)); - result = 0; /* not critical */ -out: - mempool_free(scsscf_area, qdio_mempool_scssc); - return result; -} - -int -qdio_cleanup(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - int rc; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - rc = qdio_shutdown(cdev, how); - if ((rc == 0) || (rc == -EINPROGRESS)) - rc = qdio_free(cdev); - return rc; -} - -int -qdio_shutdown(struct ccw_device *cdev, int how) -{ - struct qdio_irq *irq_ptr; - int i; - int result = 0; - int rc; - unsigned long flags; - int timeout; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - /* mark all qs as uninteresting */ - for (i=0;i<irq_ptr->no_input_qs;i++) - atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1); - - for (i=0;i<irq_ptr->no_output_qs;i++) - atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1); - - tasklet_kill(&tiqdio_tasklet); - - for (i=0;i<irq_ptr->no_input_qs;i++) { - qdio_unmark_q(irq_ptr->input_qs[i]); - tasklet_kill(&irq_ptr->input_qs[i]->tasklet); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - input_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->input_qs[i]->use_count)) - result=-EINPROGRESS; - } - - for (i=0;i<irq_ptr->no_output_qs;i++) { - tasklet_kill(&irq_ptr->output_qs[i]->tasklet); - del_timer(&irq_ptr->output_qs[i]->timer); - wait_event_interruptible_timeout(cdev->private->wait_q, - !atomic_read(&irq_ptr-> - output_qs[i]-> - use_count), - QDIO_NO_USE_COUNT_TIMEOUT); - if (atomic_read(&irq_ptr->output_qs[i]->use_count)) - result=-EINPROGRESS; - } - - /* cleanup subchannel */ - spin_lock_irqsave(get_ccwdev_lock(cdev),flags); - if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) { - rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_CLEAR_TIMEOUT; - } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) { - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } else { /* default behaviour */ - rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); - timeout=QDIO_CLEANUP_HALT_TIMEOUT; - } - if (rc == -ENODEV) { - /* No need to wait for device no longer present. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) { - /* - * Whoever put another handler there, has to cope with the - * interrupt theirself. Might happen if qdio_shutdown was - * called on already shutdown queues, but this shouldn't have - * bad side effects. - */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - } else if (rc == 0) { - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); - ccw_device_set_timeout(cdev, timeout); - spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); - - wait_event(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || - irq_ptr->state == QDIO_IRQ_STATE_ERR); - } else { - QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " - "device %s\n", result, cdev->dev.bus_id); - spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); - result = rc; - goto out; - } - if (irq_ptr->is_thinint_irq) { - qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind); - tiqdio_set_subchannel_ind(irq_ptr,1); - /* reset adapter interrupt indicators */ - } - - /* exchange int handlers, if necessary */ - if ((void*)cdev->handler == (void*)qdio_handler) - cdev->handler=irq_ptr->original_int_handler; - - /* Ignore errors. */ - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); - ccw_device_set_timeout(cdev, 0); -out: - up(&irq_ptr->setting_up_sema); - return result; -} - -int -qdio_free(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - down(&irq_ptr->setting_up_sema); - - sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT1(0,trace,dbf_text); - QDIO_DBF_TEXT0(0,setup,dbf_text); - - cdev->private->qdio_data = NULL; - - up(&irq_ptr->setting_up_sema); - - qdio_release_irq_memory(irq_ptr); - module_put(THIS_MODULE); - return 0; -} - -static void -qdio_allocate_do_dbf(struct qdio_initialize *init_data) -{ - char dbf_text[20]; /* if a printf printed out more than 8 chars */ - - sprintf(dbf_text,"qfmt:%x",init_data->q_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8); - sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*)); - QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*)); - sprintf(dbf_text,"miit%4x",init_data->min_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"mait%4x",init_data->max_input_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"miot%4x",init_data->min_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"maot%4x",init_data->max_output_threshold); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"niq:%4x",init_data->no_input_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - sprintf(dbf_text,"noq:%4x",init_data->no_output_qs); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long)); - QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*)); - QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); -} - -static void -qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) -{ - irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib); - - irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl); - - irq_ptr->qdr->qdf0[i].slsba= - (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; -} - -static void -qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, - int j, int iqfmt) -{ - irq_ptr->output_qs[i]->is_iqdio_q = iqfmt; - irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; - - irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib); - - irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl); - - irq_ptr->qdr->qdf0[i+j].slsba= - (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]); - - irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY; - irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY; -} - - -static void -qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->input_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->input_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->input_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static void -qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) -{ - int i; - - for (i=0;i<irq_ptr->no_output_qs;i++) { - irq_ptr->output_qs[i]->siga_sync= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; - irq_ptr->output_qs[i]->siga_in= - irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_out= - irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; - irq_ptr->output_qs[i]->siga_sync_done_on_thinints= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; - irq_ptr->output_qs[i]->hydra_gives_outbound_pcis= - irq_ptr->hydra_gives_outbound_pcis; - irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis= - ((irq_ptr->qdioac& - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== - (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| - CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); - - } -} - -static int -qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, - int dstat) -{ - char dbf_text[15]; - struct qdio_irq *irq_ptr; - - irq_ptr = cdev->private->qdio_data; - - if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { - sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(1,trace,dbf_text); - QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); - QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); - QDIO_PRINT_ERR("received check condition on establish " \ - "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - cstat,dstat); - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); - } - - if (!(dstat & DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:no de"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get " - "device end: dstat=%02x, cstat=%02x\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - - if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) { - QDIO_DBF_TEXT2(1,setup,"eq:badio"); - QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); - QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); - QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got " - "the following devstat: dstat=%02x, " - "cstat=%02x\n", irq_ptr->schid.ssid, - irq_ptr->schid.sch_no, dstat, cstat); - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - return 1; - } - return 0; -} - -static void -qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - irq_ptr = cdev->private->qdio_data; - - sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { - ccw_device_set_timeout(cdev, 0); - return; - } - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); - ccw_device_set_timeout(cdev, 0); -} - -int -qdio_initialize(struct qdio_initialize *init_data) -{ - int rc; - char dbf_text[15]; - - sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - rc = qdio_allocate(init_data); - if (rc == 0) { - rc = qdio_establish(init_data); - if (rc != 0) - qdio_free(init_data->cdev); - } - - return rc; -} - - -int -qdio_allocate(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - char dbf_text[15]; - - sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || - (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) || - ((init_data->no_input_qs) && (!init_data->input_handler)) || - ((init_data->no_output_qs) && (!init_data->output_handler)) ) - return -EINVAL; - - if (!init_data->input_sbal_addr_array) - return -EINVAL; - - if (!init_data->output_sbal_addr_array) - return -EINVAL; - - qdio_allocate_do_dbf(init_data); - - /* create irq */ - irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - - QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); - - if (!irq_ptr) { - QDIO_PRINT_ERR("allocation of irq_ptr failed!\n"); - return -ENOMEM; - } - - init_MUTEX(&irq_ptr->setting_up_sema); - - /* QDR must be in DMA area since CCW data address is only 32 bit */ - irq_ptr->qdr = (struct qdr *) __get_free_page(GFP_KERNEL | GFP_DMA); - if (!(irq_ptr->qdr)) { - free_page((unsigned long) irq_ptr); - QDIO_PRINT_ERR("allocation of irq_ptr->qdr failed!\n"); - return -ENOMEM; - } - QDIO_DBF_TEXT0(0,setup,"qdr:"); - QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*)); - - if (qdio_alloc_qs(irq_ptr, - init_data->no_input_qs, - init_data->no_output_qs)) { - QDIO_PRINT_ERR("queue allocation failed!\n"); - qdio_release_irq_memory(irq_ptr); - return -ENOMEM; - } - - init_data->cdev->private->qdio_data = irq_ptr; - - qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE); - - return 0; -} - -static int qdio_fill_irq(struct qdio_initialize *init_data) -{ - int i; - char dbf_text[15]; - struct ciw *ciw; - int is_iqdio; - struct qdio_irq *irq_ptr; - - irq_ptr = init_data->cdev->private->qdio_data; - - memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr)); - - /* wipes qib.ac, required by ar7063 */ - memset(irq_ptr->qdr,0,sizeof(struct qdr)); - - irq_ptr->int_parm=init_data->int_parm; - - irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); - irq_ptr->no_input_qs=init_data->no_input_qs; - irq_ptr->no_output_qs=init_data->no_output_qs; - - if (init_data->q_format==QDIO_IQDIO_QFMT) { - irq_ptr->is_iqdio_irq=1; - irq_ptr->is_thinint_irq=1; - } else { - irq_ptr->is_iqdio_irq=0; - irq_ptr->is_thinint_irq=hydra_thinints; - } - sprintf(dbf_text,"is_i_t%1x%1x", - irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - if (irq_ptr->is_thinint_irq) { - irq_ptr->dev_st_chg_ind = qdio_get_indicator(); - QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); - if (!irq_ptr->dev_st_chg_ind) { - QDIO_PRINT_WARN("no indicator location available " \ - "for irq 0.%x.%x\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no); - qdio_release_irq_memory(irq_ptr); - return -ENOBUFS; - } - } - - /* defaults */ - irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD; - irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT; - irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD; - irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT; - - qdio_fill_qs(irq_ptr, init_data->cdev, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_handler, - init_data->output_handler,init_data->int_parm, - init_data->q_format,init_data->flags, - init_data->input_sbal_addr_array, - init_data->output_sbal_addr_array); - - if (!try_module_get(THIS_MODULE)) { - QDIO_PRINT_CRIT("try_module_get() failed!\n"); - qdio_release_irq_memory(irq_ptr); - return -EINVAL; - } - - qdio_fill_thresholds(irq_ptr,init_data->no_input_qs, - init_data->no_output_qs, - init_data->min_input_threshold, - init_data->max_input_threshold, - init_data->min_output_threshold, - init_data->max_output_threshold); - - /* fill in qdr */ - irq_ptr->qdr->qfmt=init_data->q_format; - irq_ptr->qdr->iqdcnt=init_data->no_input_qs; - irq_ptr->qdr->oqdcnt=init_data->no_output_qs; - irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */ - irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4; - - irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib; - irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; - - /* fill in qib */ - irq_ptr->is_qebsm = is_passthrough; - if (irq_ptr->is_qebsm) - irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; - - irq_ptr->qib.qfmt=init_data->q_format; - if (init_data->no_input_qs) - irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); - if (init_data->no_output_qs) - irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib); - memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8); - - qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format, - init_data->qib_param_field, - init_data->no_input_qs, - init_data->no_output_qs, - init_data->input_slib_elements, - init_data->output_slib_elements); - - /* first input descriptors, then output descriptors */ - is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0; - for (i=0;i<init_data->no_input_qs;i++) - qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio); - - for (i=0;i<init_data->no_output_qs;i++) - qdio_allocate_fill_output_desc(irq_ptr, i, - init_data->no_input_qs, - is_iqdio); - - /* qdr, qib, sls, slsbs, slibs, sbales filled. */ - - /* get qdio commands */ - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no eq"); - QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->equeue = *ciw; - ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); - if (!ciw) { - QDIO_DBF_TEXT2(1,setup,"no aq"); - QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. " - "Trying to use default.\n"); - } else - irq_ptr->aqueue = *ciw; - - /* Set new interrupt handler. */ - irq_ptr->original_int_handler = init_data->cdev->handler; - init_data->cdev->handler = qdio_handler; - - return 0; -} - -int -qdio_establish(struct qdio_initialize *init_data) -{ - struct qdio_irq *irq_ptr; - unsigned long saveflags; - int result, result2; - struct ccw_device *cdev; - char dbf_text[20]; - - cdev=init_data->cdev; - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -EINVAL; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - - qdio_fill_irq(init_data); - - /* the thinint CHSC stuff */ - if (irq_ptr->is_thinint_irq) { - - result = tiqdio_set_subchannel_ind(irq_ptr,0); - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); - } - - sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_DBF_TEXT0(0,trace,dbf_text); - - /* establish q */ - irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->equeue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_options_mask(cdev, 0); - result = ccw_device_start(cdev, &irq_ptr->ccw, - QDIO_DOING_ESTABLISH, 0, 0); - if (result) { - result2 = ccw_device_start(cdev, &irq_ptr->ccw, - QDIO_DOING_ESTABLISH, 0, 0); - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"eq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result, result2); - result=result2; - if (result) - ccw_device_set_timeout(cdev, 0); - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - - if (result) { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR); - return result; - } - - wait_event_interruptible_timeout(cdev->private->wait_q, - irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || - irq_ptr->state == QDIO_IRQ_STATE_ERR, - QDIO_ESTABLISH_TIMEOUT); - - if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED) - result = 0; - else { - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - return -EIO; - } - - qdio_get_ssqd_siga(irq_ptr); - /* if this gets set once, we're running under VM and can omit SVSes */ - if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) - omit_svs=1; - - sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac); - QDIO_DBF_TEXT2(0,setup,dbf_text); - - irq_ptr->hydra_gives_outbound_pcis= - irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED; - irq_ptr->sync_done_on_outb_pcis= - irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS; - - qdio_initialize_set_siga_flags_input(irq_ptr); - qdio_initialize_set_siga_flags_output(irq_ptr); - - up(&irq_ptr->setting_up_sema); - - return result; - -} - -int -qdio_activate(struct ccw_device *cdev, int flags) -{ - struct qdio_irq *irq_ptr; - int i,result=0,result2; - unsigned long saveflags; - char dbf_text[20]; /* see qdio_initialize */ - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - - if (cdev->private->state != DEV_STATE_ONLINE) - return -EINVAL; - - down(&irq_ptr->setting_up_sema); - if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) { - result=-EBUSY; - goto out; - } - - sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no); - QDIO_DBF_TEXT2(0,setup,dbf_text); - QDIO_DBF_TEXT2(0,trace,dbf_text); - - /* activate q */ - irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd; - irq_ptr->ccw.flags=CCW_FLAG_SLI; - irq_ptr->ccw.count=irq_ptr->aqueue.count; - irq_ptr->ccw.cda=QDIO_GET_ADDR(0); - - spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - - ccw_device_set_timeout(cdev, 0); - ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); - result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, - 0, DOIO_DENY_PREFETCH); - if (result) { - result2=ccw_device_start(cdev,&irq_ptr->ccw, - QDIO_DOING_ACTIVATE,0,0); - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - if (result2) { - sprintf(dbf_text,"aq:io%4x",result); - QDIO_DBF_TEXT2(1,setup,dbf_text); - } - QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \ - "returned %i, next try returned %i\n", - irq_ptr->schid.ssid, irq_ptr->schid.sch_no, - result, result2); - result=result2; - } - - spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); - if (result) - goto out; - - for (i=0;i<irq_ptr->no_input_qs;i++) { - if (irq_ptr->is_thinint_irq) { - /* - * that way we know, that, if we will get interrupted - * by tiqdio_inbound_processing, qdio_unmark_q will - * not be called - */ - qdio_reserve_q(irq_ptr->input_qs[i]); - qdio_mark_tiq(irq_ptr->input_qs[i]); - qdio_release_q(irq_ptr->input_qs[i]); - } - } - - if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) { - for (i=0;i<irq_ptr->no_input_qs;i++) { - irq_ptr->input_qs[i]->is_input_q|= - QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT; - } - } - - msleep(QDIO_ACTIVATE_TIMEOUT); - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_STOPPED: - case QDIO_IRQ_STATE_ERR: - up(&irq_ptr->setting_up_sema); - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - down(&irq_ptr->setting_up_sema); - result = -EIO; - break; - default: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); - result = 0; - } - out: - up(&irq_ptr->setting_up_sema); - - return result; -} - -/* buffers filled forwards again to make Rick happy */ -static void -qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - int tmp = 0; - - qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); - if (irq->is_qebsm) { - while (count) { - tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); - if (!tmp) - return; - } - return; - } - for (;;) { - set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count); - count--; - if (!count) break; - qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); - } -} - -static void -qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, - unsigned int count, struct qdio_buffer *buffers) -{ - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - int tmp = 0; - - qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1); - if (irq->is_qebsm) { - while (count) { - tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); - if (!tmp) - return; - } - return; - } - - for (;;) { - set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count); - count--; - if (!count) break; - qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1); - } -} - -static void -do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - - /* This is the inbound handling of queues */ - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - qdio_do_qdio_fill_input(q,qidx,count,buffers); - - if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&& - (callflags&QDIO_FLAG_UNDER_INTERRUPT)) - atomic_xchg(&q->polling,0); - - if (used_elements) - return; - if (callflags&QDIO_FLAG_DONT_SIGA) - return; - if (q->siga_in) { - int result; - - result=qdio_siga_input(q); - if (result) { - if (q->siga_error) - q->error_status_flags|= - QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; - q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; - q->siga_error=result; - } - } - - qdio_mark_q(q); -} - -static void -do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, - unsigned int qidx, unsigned int count, - struct qdio_buffer *buffers) -{ - int used_elements; - unsigned int cnt, start_buf; - unsigned char state = 0; - struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; - - /* This is the outbound handling of queues */ - qdio_do_qdio_fill_output(q,qidx,count,buffers); - - used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; - - if (callflags&QDIO_FLAG_DONT_SIGA) { - qdio_perf_stat_inc(&perf_stats.outbound_cnt); - return; - } - if (callflags & QDIO_FLAG_PCI_OUT) - q->is_pci_out = 1; - else - q->is_pci_out = 0; - if (q->is_iqdio_q) { - /* one siga for every sbal */ - while (count--) - qdio_kick_outbound_q(q); - - __qdio_outbound_processing(q); - } else { - /* under VM, we do a SIGA sync unconditionally */ - SYNC_MEMORY; - else { - /* - * w/o shadow queues (else branch of - * SYNC_MEMORY :-/ ), we try to - * fast-requeue buffers - */ - if (irq->is_qebsm) { - cnt = 1; - start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) & - (QDIO_MAX_BUFFERS_PER_Q-1)); - qdio_do_eqbs(q, &state, &start_buf, &cnt); - } else - state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) - &(QDIO_MAX_BUFFERS_PER_Q-1) ]; - if (state != SLSB_CU_OUTPUT_PRIMED) { - qdio_kick_outbound_q(q); - } else { - QDIO_DBF_TEXT3(0,trace, "fast-req"); - qdio_perf_stat_inc(&perf_stats.fast_reqs); - } - } - /* - * only marking the q could take too long, - * the upper layer module could do a lot of - * traffic in that time - */ - __qdio_outbound_processing(q); - } - - qdio_perf_stat_inc(&perf_stats.outbound_cnt); -} - -/* count must be 1 in iqdio */ -int -do_QDIO(struct ccw_device *cdev,unsigned int callflags, - unsigned int queue_number, unsigned int qidx, - unsigned int count,struct qdio_buffer *buffers) -{ - struct qdio_irq *irq_ptr; -#ifdef CONFIG_QDIO_DEBUG - char dbf_text[20]; - - sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) || - (count>QDIO_MAX_BUFFERS_PER_Q) || - (queue_number>QDIO_MAX_QUEUES_PER_IRQ) ) - return -EINVAL; - - if (count==0) - return 0; - - irq_ptr = cdev->private->qdio_data; - if (!irq_ptr) - return -ENODEV; - -#ifdef CONFIG_QDIO_DEBUG - if (callflags&QDIO_FLAG_SYNC_INPUT) - QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number], - sizeof(void*)); - else - QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number], - sizeof(void*)); - sprintf(dbf_text,"flag%04x",callflags); - QDIO_DBF_TEXT3(0,trace,dbf_text); - sprintf(dbf_text,"qi%02xct%02x",qidx,count); - QDIO_DBF_TEXT3(0,trace,dbf_text); -#endif /* CONFIG_QDIO_DEBUG */ - - if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE) - return -EBUSY; - - if (callflags&QDIO_FLAG_SYNC_INPUT) - do_qdio_handle_inbound(irq_ptr->input_qs[queue_number], - callflags, qidx, count, buffers); - else if (callflags&QDIO_FLAG_SYNC_OUTPUT) - do_qdio_handle_outbound(irq_ptr->output_qs[queue_number], - callflags, qidx, count, buffers); - else { - QDIO_DBF_TEXT3(1,trace,"doQD:inv"); - return -EINVAL; - } - return 0; -} - -static int -qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, - int buffer_length, int *eof, void *data) -{ - int c=0; - - /* we are always called with buffer_length=4k, so we all - deliver on the first read */ - if (offset>0) - return 0; - -#define _OUTP_IT(x...) c+=sprintf(buffer+c,x) -#ifdef CONFIG_64BIT - _OUTP_IT("Number of tasklet runs (total) : %li\n", - (long)atomic64_read(&perf_stats.tl_runs)); - _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.inbound_tl_runs), - (long)atomic64_read(&perf_stats.inbound_tl_runs_resched)); - _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.inbound_thin_tl_runs), - (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched)); - _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n", - (long)atomic64_read(&perf_stats.outbound_tl_runs), - (long)atomic64_read(&perf_stats.outbound_tl_runs_resched)); - _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_syncs)); - _OUTP_IT("Number of SIGA in's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_ins)); - _OUTP_IT("Number of SIGA out's issued : %li\n", - (long)atomic64_read(&perf_stats.siga_outs)); - _OUTP_IT("Number of PCIs caught : %li\n", - (long)atomic64_read(&perf_stats.pcis)); - _OUTP_IT("Number of adapter interrupts caught : %li\n", - (long)atomic64_read(&perf_stats.thinints)); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n", - (long)atomic64_read(&perf_stats.fast_reqs)); - _OUTP_IT("\n"); - _OUTP_IT("Number of inbound transfers : %li\n", - (long)atomic64_read(&perf_stats.inbound_cnt)); - _OUTP_IT("Number of do_QDIOs outbound : %li\n", - (long)atomic64_read(&perf_stats.outbound_cnt)); -#else /* CONFIG_64BIT */ - _OUTP_IT("Number of tasklet runs (total) : %i\n", - atomic_read(&perf_stats.tl_runs)); - _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.inbound_tl_runs), - atomic_read(&perf_stats.inbound_tl_runs_resched)); - _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.inbound_thin_tl_runs), - atomic_read(&perf_stats.inbound_thin_tl_runs_resched)); - _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n", - atomic_read(&perf_stats.outbound_tl_runs), - atomic_read(&perf_stats.outbound_tl_runs_resched)); - _OUTP_IT("\n"); - _OUTP_IT("Number of SIGA sync's issued : %i\n", - atomic_read(&perf_stats.siga_syncs)); - _OUTP_IT("Number of SIGA in's issued : %i\n", - atomic_read(&perf_stats.siga_ins)); - _OUTP_IT("Number of SIGA out's issued : %i\n", - atomic_read(&perf_stats.siga_outs)); - _OUTP_IT("Number of PCIs caught : %i\n", - atomic_read(&perf_stats.pcis)); - _OUTP_IT("Number of adapter interrupts caught : %i\n", - atomic_read(&perf_stats.thinints)); - _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n", - atomic_read(&perf_stats.fast_reqs)); - _OUTP_IT("\n"); - _OUTP_IT("Number of inbound transfers : %i\n", - atomic_read(&perf_stats.inbound_cnt)); - _OUTP_IT("Number of do_QDIOs outbound : %i\n", - atomic_read(&perf_stats.outbound_cnt)); -#endif /* CONFIG_64BIT */ - _OUTP_IT("\n"); - - return c; -} - -static struct proc_dir_entry *qdio_perf_proc_file; - -static void -qdio_add_procfs_entry(void) -{ - proc_perf_file_registration=0; - qdio_perf_proc_file=create_proc_entry(QDIO_PERF, - S_IFREG|0444,NULL); - if (qdio_perf_proc_file) { - qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; - } else proc_perf_file_registration=-1; - - if (proc_perf_file_registration) - QDIO_PRINT_WARN("was not able to register perf. " \ - "proc-file (%i).\n", - proc_perf_file_registration); -} - -static void -qdio_remove_procfs_entry(void) -{ - if (!proc_perf_file_registration) /* means if it went ok earlier */ - remove_proc_entry(QDIO_PERF,NULL); -} - -/** - * attributes in sysfs - *****************************************************************************/ - -static ssize_t -qdio_performance_stats_show(struct bus_type *bus, char *buf) -{ - return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); -} - -static ssize_t -qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count) -{ - unsigned long i; - int ret; - - ret = strict_strtoul(buf, 16, &i); - if (!ret && ((i == 0) || (i == 1))) { - if (i == qdio_performance_stats) - return count; - qdio_performance_stats = i; - if (i==0) { - /* reset perf. stat. info */ -#ifdef CONFIG_64BIT - atomic64_set(&perf_stats.tl_runs, 0); - atomic64_set(&perf_stats.outbound_tl_runs, 0); - atomic64_set(&perf_stats.inbound_tl_runs, 0); - atomic64_set(&perf_stats.inbound_tl_runs_resched, 0); - atomic64_set(&perf_stats.inbound_thin_tl_runs, 0); - atomic64_set(&perf_stats.inbound_thin_tl_runs_resched, - 0); - atomic64_set(&perf_stats.siga_outs, 0); - atomic64_set(&perf_stats.siga_ins, 0); - atomic64_set(&perf_stats.siga_syncs, 0); - atomic64_set(&perf_stats.pcis, 0); - atomic64_set(&perf_stats.thinints, 0); - atomic64_set(&perf_stats.fast_reqs, 0); - atomic64_set(&perf_stats.outbound_cnt, 0); - atomic64_set(&perf_stats.inbound_cnt, 0); -#else /* CONFIG_64BIT */ - atomic_set(&perf_stats.tl_runs, 0); - atomic_set(&perf_stats.outbound_tl_runs, 0); - atomic_set(&perf_stats.inbound_tl_runs, 0); - atomic_set(&perf_stats.inbound_tl_runs_resched, 0); - atomic_set(&perf_stats.inbound_thin_tl_runs, 0); - atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0); - atomic_set(&perf_stats.siga_outs, 0); - atomic_set(&perf_stats.siga_ins, 0); - atomic_set(&perf_stats.siga_syncs, 0); - atomic_set(&perf_stats.pcis, 0); - atomic_set(&perf_stats.thinints, 0); - atomic_set(&perf_stats.fast_reqs, 0); - atomic_set(&perf_stats.outbound_cnt, 0); - atomic_set(&perf_stats.inbound_cnt, 0); -#endif /* CONFIG_64BIT */ - } - } else { - QDIO_PRINT_ERR("QDIO performance_stats: write 0 or 1 to this file!\n"); - return -EINVAL; - } - return count; -} - -static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show, - qdio_performance_stats_store); - -static void -tiqdio_register_thinints(void) -{ - char dbf_text[20]; - - tiqdio_ind = - s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); - if (IS_ERR(tiqdio_ind)) { - sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); - QDIO_DBF_TEXT0(0,setup,dbf_text); - QDIO_PRINT_ERR("failed to register adapter handler " \ - "(rc=%li).\nAdapter interrupts might " \ - "not work. Continuing.\n", - PTR_ERR(tiqdio_ind)); - tiqdio_ind = NULL; - } -} - -static void -tiqdio_unregister_thinints(void) -{ - if (tiqdio_ind) - s390_unregister_adapter_interrupt(tiqdio_ind); -} - -static int -qdio_get_qdio_memory(void) -{ - int i; - indicator_used[0]=1; - - for (i=1;i<INDICATORS_PER_CACHELINE;i++) - indicator_used[i]=0; - indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), - GFP_KERNEL); - if (!indicators) - return -ENOMEM; - return 0; -} - -static void -qdio_release_qdio_memory(void) -{ - kfree(indicators); -} - -static void -qdio_unregister_dbf_views(void) -{ - if (qdio_dbf_setup) - debug_unregister(qdio_dbf_setup); - if (qdio_dbf_sbal) - debug_unregister(qdio_dbf_sbal); - if (qdio_dbf_sense) - debug_unregister(qdio_dbf_sense); - if (qdio_dbf_trace) - debug_unregister(qdio_dbf_trace); -#ifdef CONFIG_QDIO_DEBUG - if (qdio_dbf_slsb_out) - debug_unregister(qdio_dbf_slsb_out); - if (qdio_dbf_slsb_in) - debug_unregister(qdio_dbf_slsb_in); -#endif /* CONFIG_QDIO_DEBUG */ -} - -static int -qdio_register_dbf_views(void) -{ - qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME, - QDIO_DBF_SETUP_PAGES, - QDIO_DBF_SETUP_NR_AREAS, - QDIO_DBF_SETUP_LEN); - if (!qdio_dbf_setup) - goto oom; - debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL); - - qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME, - QDIO_DBF_SBAL_PAGES, - QDIO_DBF_SBAL_NR_AREAS, - QDIO_DBF_SBAL_LEN); - if (!qdio_dbf_sbal) - goto oom; - - debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL); - - qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME, - QDIO_DBF_SENSE_PAGES, - QDIO_DBF_SENSE_NR_AREAS, - QDIO_DBF_SENSE_LEN); - if (!qdio_dbf_sense) - goto oom; - - debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL); - - qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME, - QDIO_DBF_TRACE_PAGES, - QDIO_DBF_TRACE_NR_AREAS, - QDIO_DBF_TRACE_LEN); - if (!qdio_dbf_trace) - goto oom; - - debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL); - -#ifdef CONFIG_QDIO_DEBUG - qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME, - QDIO_DBF_SLSB_OUT_PAGES, - QDIO_DBF_SLSB_OUT_NR_AREAS, - QDIO_DBF_SLSB_OUT_LEN); - if (!qdio_dbf_slsb_out) - goto oom; - debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL); - - qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME, - QDIO_DBF_SLSB_IN_PAGES, - QDIO_DBF_SLSB_IN_NR_AREAS, - QDIO_DBF_SLSB_IN_LEN); - if (!qdio_dbf_slsb_in) - goto oom; - debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view); - debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL); -#endif /* CONFIG_QDIO_DEBUG */ - return 0; -oom: - QDIO_PRINT_ERR("not enough memory for dbf.\n"); - qdio_unregister_dbf_views(); - return -ENOMEM; -} - -static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size) -{ - return (void *) get_zeroed_page(gfp_mask|GFP_DMA); -} - -static void qdio_mempool_free(void *element, void *size) -{ - free_page((unsigned long) element); -} - -static int __init -init_QDIO(void) -{ - int res; - void *ptr; - - printk("qdio: loading %s\n",version); - - res=qdio_get_qdio_memory(); - if (res) - return res; - - qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), - 256, 0, NULL); - if (!qdio_q_cache) { - qdio_release_qdio_memory(); - return -ENOMEM; - } - - res = qdio_register_dbf_views(); - if (res) { - kmem_cache_destroy(qdio_q_cache); - qdio_release_qdio_memory(); - return res; - } - - QDIO_DBF_TEXT0(0,setup,"initQDIO"); - res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); - - memset((void*)&perf_stats,0,sizeof(perf_stats)); - QDIO_DBF_TEXT0(0,setup,"perfstat"); - ptr=&perf_stats; - QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*)); - - qdio_add_procfs_entry(); - - qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS, - qdio_mempool_alloc, - qdio_mempool_free, NULL); - - if (tiqdio_check_chsc_availability()) - QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); - - tiqdio_register_thinints(); - - return 0; - } - -static void __exit -cleanup_QDIO(void) -{ - tiqdio_unregister_thinints(); - qdio_remove_procfs_entry(); - qdio_release_qdio_memory(); - qdio_unregister_dbf_views(); - mempool_destroy(qdio_mempool_scssc); - kmem_cache_destroy(qdio_q_cache); - bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); - printk("qdio: %s: module removed\n",version); -} - -module_init(init_QDIO); -module_exit(cleanup_QDIO); - -EXPORT_SYMBOL(qdio_allocate); -EXPORT_SYMBOL(qdio_establish); -EXPORT_SYMBOL(qdio_initialize); -EXPORT_SYMBOL(qdio_activate); -EXPORT_SYMBOL(do_QDIO); -EXPORT_SYMBOL(qdio_shutdown); -EXPORT_SYMBOL(qdio_free); -EXPORT_SYMBOL(qdio_cleanup); -EXPORT_SYMBOL(qdio_synchronize); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index c3df6b2c38b7..c1a70985abfa 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -1,66 +1,20 @@ +/* + * linux/drivers/s390/cio/qdio.h + * + * Copyright 2000,2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ #ifndef _CIO_QDIO_H #define _CIO_QDIO_H #include <asm/page.h> +#include <asm/schid.h> +#include "chsc.h" -#include "schid.h" - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_VERBOSE_LEVEL 9 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_VERBOSE_LEVEL 5 -#endif /* CONFIG_QDIO_DEBUG */ -#define QDIO_USE_PROCESSING_STATE - -#define QDIO_MINIMAL_BH_RELIEF_TIME 16 -#define QDIO_TIMER_POLL_VALUE 1 -#define IQDIO_TIMER_POLL_VALUE 1 - -/* - * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as - * we never know, whether we'll get initiative again, e.g. to give the - * transmit skb's back to the stack, however the stack may be waiting for - * them... therefore we define 4 as threshold to start polling (which - * will stop as soon as the asynchronous queue catches up) - * btw, this only applies to the asynchronous HiperSockets queue - */ -#define IQDIO_FILL_LEVEL_TO_POLL 4 - -#define TIQDIO_THININT_ISC 3 -#define TIQDIO_DELAY_TARGET 0 -#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ -#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ -#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */ -#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */ -#define IQDIO_LOCAL_LAPS 4 -#define IQDIO_LOCAL_LAPS_INT 1 -#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2 -/*#define IQDIO_IQDC_INT_PARM 0x1234*/ - -#define QDIO_Q_LAPS 5 - -#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY - -#define L2_CACHELINE_SIZE 256 -#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32)) - -#define QDIO_PERF "qdio_perf" - -/* must be a power of 2 */ -/*#define QDIO_STATS_NUMBER 4 - -#define QDIO_STATS_CLASSES 2 -#define QDIO_STATS_COUNT_NEEDED 2*/ - -#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before - exiting without having use_count - of the queue to 0 */ - -#define QDIO_ESTABLISH_TIMEOUT (1*HZ) -#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) -#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) -#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) -#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */ +#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ +#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ +#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ enum qdio_irq_states { QDIO_IRQ_STATE_INACTIVE, @@ -72,565 +26,352 @@ enum qdio_irq_states { NR_QDIO_IRQ_STATES, }; -/* used as intparm in do_IO: */ -#define QDIO_DOING_SENSEID 0 -#define QDIO_DOING_ESTABLISH 1 -#define QDIO_DOING_ACTIVATE 2 -#define QDIO_DOING_CLEANUP 3 - -/************************* DEBUG FACILITY STUFF *********************/ - -#define QDIO_DBF_HEX(ex,name,level,addr,len) \ - do { \ - if (ex) \ - debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ - else \ - debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ - } while (0) -#define QDIO_DBF_TEXT(ex,name,level,text) \ - do { \ - if (ex) \ - debug_text_exception(qdio_dbf_##name,level,text); \ - else \ - debug_text_event(qdio_dbf_##name,level,text); \ - } while (0) - - -#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) -#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) -#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) -#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) -#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) -#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) -#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) -#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) -#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text) -#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text) -#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text) -#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text) -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0) -#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0) -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SETUP_NAME "qdio_setup" -#define QDIO_DBF_SETUP_LEN 8 -#define QDIO_DBF_SETUP_PAGES 4 -#define QDIO_DBF_SETUP_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SETUP_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SETUP_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */ -#define QDIO_DBF_SBAL_LEN 256 -#define QDIO_DBF_SBAL_PAGES 4 -#define QDIO_DBF_SBAL_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SBAL_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SBAL_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_TRACE_NAME "qdio_trace" -#define QDIO_DBF_TRACE_LEN 8 -#define QDIO_DBF_TRACE_NR_AREAS 2 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_TRACE_PAGES 16 -#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */ -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_TRACE_PAGES 4 -#define QDIO_DBF_TRACE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_DBF_SENSE_NAME "qdio_sense" -#define QDIO_DBF_SENSE_LEN 64 -#define QDIO_DBF_SENSE_PAGES 2 -#define QDIO_DBF_SENSE_NR_AREAS 1 -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_DBF_SENSE_LEVEL 6 -#else /* CONFIG_QDIO_DEBUG */ -#define QDIO_DBF_SENSE_LEVEL 2 -#endif /* CONFIG_QDIO_DEBUG */ - -#ifdef CONFIG_QDIO_DEBUG -#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT - -#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out" -#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_OUT_PAGES 256 -#define QDIO_DBF_SLSB_OUT_NR_AREAS 1 -#define QDIO_DBF_SLSB_OUT_LEVEL 6 - -#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in" -#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q -#define QDIO_DBF_SLSB_IN_PAGES 256 -#define QDIO_DBF_SLSB_IN_NR_AREAS 1 -#define QDIO_DBF_SLSB_IN_LEVEL 6 -#endif /* CONFIG_QDIO_DEBUG */ - -#define QDIO_PRINTK_HEADER QDIO_NAME ": " - -#if QDIO_VERBOSE_LEVEL>8 -#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_STUPID(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>7 -#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALL(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>6 -#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_INFO(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>5 -#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_WARN(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>4 -#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ERR(x...) do { } while (0) -#endif - -#if QDIO_VERBOSE_LEVEL>3 -#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_CRIT(x...) do { } while (0) -#endif +/* used as intparm in do_IO */ +#define QDIO_DOING_ESTABLISH 1 +#define QDIO_DOING_ACTIVATE 2 +#define QDIO_DOING_CLEANUP 3 + +#define SLSB_STATE_NOT_INIT 0x0 +#define SLSB_STATE_EMPTY 0x1 +#define SLSB_STATE_PRIMED 0x2 +#define SLSB_STATE_HALTED 0xe +#define SLSB_STATE_ERROR 0xf +#define SLSB_TYPE_INPUT 0x0 +#define SLSB_TYPE_OUTPUT 0x20 +#define SLSB_OWNER_PROG 0x80 +#define SLSB_OWNER_CU 0x40 + +#define SLSB_P_INPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */ +#define SLSB_P_INPUT_ACK \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */ +#define SLSB_CU_INPUT_EMPTY \ + (SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */ +#define SLSB_P_INPUT_PRIMED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */ +#define SLSB_P_INPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */ +#define SLSB_P_INPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */ +#define SLSB_P_OUTPUT_NOT_INIT \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ +#define SLSB_P_OUTPUT_EMPTY \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ +#define SLSB_CU_OUTPUT_PRIMED \ + (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ +#define SLSB_P_OUTPUT_HALTED \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */ +#define SLSB_P_OUTPUT_ERROR \ + (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */ + +#define SLSB_ERROR_DURING_LOOKUP 0xff + +/* additional CIWs returned by extended Sense-ID */ +#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ +#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ -#if QDIO_VERBOSE_LEVEL>2 -#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_ALERT(x...) do { } while (0) -#endif +/* flags for st qdio sch data */ +#define CHSC_FLAG_QDIO_CAPABILITY 0x80 +#define CHSC_FLAG_VALIDITY 0x40 + +/* qdio adapter-characteristics-1 flag */ +#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */ +#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */ +#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */ +#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */ +#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */ +#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ +#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ -#if QDIO_VERBOSE_LEVEL>1 -#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) -#else -#define QDIO_PRINT_EMERG(x...) do { } while (0) -#endif - -#define QDIO_HEXDUMP16(importance,header,ptr) \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x\n",*(((char*)ptr)), \ - *(((char*)ptr)+1),*(((char*)ptr)+2), \ - *(((char*)ptr)+3),*(((char*)ptr)+4), \ - *(((char*)ptr)+5),*(((char*)ptr)+6), \ - *(((char*)ptr)+7),*(((char*)ptr)+8), \ - *(((char*)ptr)+9),*(((char*)ptr)+10), \ - *(((char*)ptr)+11),*(((char*)ptr)+12), \ - *(((char*)ptr)+13),*(((char*)ptr)+14), \ - *(((char*)ptr)+15)); \ -QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ - "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ - *(((char*)ptr)+16),*(((char*)ptr)+17), \ - *(((char*)ptr)+18),*(((char*)ptr)+19), \ - *(((char*)ptr)+20),*(((char*)ptr)+21), \ - *(((char*)ptr)+22),*(((char*)ptr)+23), \ - *(((char*)ptr)+24),*(((char*)ptr)+25), \ - *(((char*)ptr)+26),*(((char*)ptr)+27), \ - *(((char*)ptr)+28),*(((char*)ptr)+29), \ - *(((char*)ptr)+30),*(((char*)ptr)+31)); - -/****************** END OF DEBUG FACILITY STUFF *********************/ +#ifdef CONFIG_64BIT +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) +{ + register unsigned long _ccq asm ("0") = *count; + register unsigned long _token asm ("1") = token; + unsigned long _queuestart = ((unsigned long)queue << 32) | *start; -/* - * Some instructions as assembly - */ + asm volatile( + " .insn rsy,0xeb000000008A,%1,0,0(%2)" + : "+d" (_ccq), "+d" (_queuestart) + : "d" ((unsigned long)state), "d" (_token) + : "memory", "cc"); + *count = _ccq & 0xff; + *start = _queuestart & 0xff; -static inline int -do_sqbs(unsigned long sch, unsigned char state, int queue, - unsigned int *start, unsigned int *count) -{ -#ifdef CONFIG_64BIT - register unsigned long _ccq asm ("0") = *count; - register unsigned long _sch asm ("1") = sch; - unsigned long _queuestart = ((unsigned long)queue << 32) | *start; - - asm volatile( - " .insn rsy,0xeb000000008A,%1,0,0(%2)" - : "+d" (_ccq), "+d" (_queuestart) - : "d" ((unsigned long)state), "d" (_sch) - : "memory", "cc"); - *count = _ccq & 0xff; - *start = _queuestart & 0xff; - - return (_ccq >> 32) & 0xff; -#else - return 0; -#endif + return (_ccq >> 32) & 0xff; } -static inline int -do_eqbs(unsigned long sch, unsigned char *state, int queue, - unsigned int *start, unsigned int *count) +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count) { -#ifdef CONFIG_64BIT register unsigned long _ccq asm ("0") = *count; - register unsigned long _sch asm ("1") = sch; + register unsigned long _token asm ("1") = token; unsigned long _queuestart = ((unsigned long)queue << 32) | *start; unsigned long _state = 0; asm volatile( " .insn rrf,0xB99c0000,%1,%2,0,0" : "+d" (_ccq), "+d" (_queuestart), "+d" (_state) - : "d" (_sch) - : "memory", "cc" ); + : "d" (_token) + : "memory", "cc"); *count = _ccq & 0xff; *start = _queuestart & 0xff; *state = _state & 0xff; return (_ccq >> 32) & 0xff; -#else - return 0; -#endif } +#else +static inline int do_sqbs(u64 token, unsigned char state, int queue, + int *start, int *count) { return 0; } +static inline int do_eqbs(u64 token, unsigned char *state, int queue, + int *start, int *count) { return 0; } +#endif /* CONFIG_64BIT */ +struct qdio_irq; -static inline int -do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2) -{ - register unsigned long reg0 asm ("0") = 2; - register struct subchannel_id reg1 asm ("1") = schid; - register unsigned long reg2 asm ("2") = mask1; - register unsigned long reg3 asm ("3") = mask2; - int cc; - - asm volatile( - " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc"); - return cc; -} - -static inline int -do_siga_input(struct subchannel_id schid, unsigned int mask) -{ - register unsigned long reg0 asm ("0") = 1; - register struct subchannel_id reg1 asm ("1") = schid; - register unsigned long reg2 asm ("2") = mask; - int cc; - - asm volatile( - " siga 0\n" - " ipm %0\n" - " srl %0,28\n" - : "=d" (cc) - : "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory"); - return cc; -} - -static inline int -do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb, - unsigned int fc) -{ - register unsigned long __fc asm("0") = fc; - register unsigned long __schid asm("1") = schid; - register unsigned long __mask asm("2") = mask; - int cc; - - asm volatile( - " siga 0\n" - "0: ipm %0\n" - " srl %0,28\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) - : "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) - : "cc", "memory"); - (*bb) = ((unsigned int) __fc) >> 31; - return cc; -} - -static inline unsigned long -do_clear_global_summary(void) -{ - register unsigned long __fn asm("1") = 3; - register unsigned long __tmp asm("2"); - register unsigned long __time asm("3"); - - asm volatile( - " .insn rre,0xb2650000,2,0" - : "+d" (__fn), "=d" (__tmp), "=d" (__time)); - return __time; -} - -/* - * QDIO device commands returned by extended Sense-ID - */ -#define DEFAULT_ESTABLISH_QS_CMD 0x1b -#define DEFAULT_ESTABLISH_QS_COUNT 0x1000 -#define DEFAULT_ACTIVATE_QS_CMD 0x1f -#define DEFAULT_ACTIVATE_QS_COUNT 0 - -/* - * additional CIWs returned by extended Sense-ID - */ -#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ -#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ - -#define QDIO_CHSC_RESPONSE_CODE_OK 1 -/* flags for st qdio sch data */ -#define CHSC_FLAG_QDIO_CAPABILITY 0x80 -#define CHSC_FLAG_VALIDITY 0x40 - -#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 -#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 -#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 -#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 +struct siga_flag { + u8 input:1; + u8 output:1; + u8 sync:1; + u8 no_sync_ti:1; + u8 no_sync_out_ti:1; + u8 no_sync_out_pci:1; + u8:2; +} __attribute__ ((packed)); -struct qdio_chsc_ssqd { +struct chsc_ssqd_area { struct chsc_header request; - u16 reserved1:10; - u16 ssid:2; - u16 fmt:4; + u16:10; + u8 ssid:2; + u8 fmt:4; u16 first_sch; - u16 reserved2; + u16:16; u16 last_sch; - u32 reserved3; + u32:32; struct chsc_header response; - u32 reserved4; - u8 flags; - u8 reserved5; - u16 sch; - u8 qfmt; - u8 parm; - u8 qdioac1; - u8 sch_class; - u8 pct; - u8 icnt; - u8 reserved7; - u8 ocnt; - u8 reserved8; - u8 mbccnt; - u16 qdioac2; - u64 sch_token; -}; + u32:32; + struct qdio_ssqd_desc qdio_ssqd; +} __attribute__ ((packed)); -struct qdio_perf_stats { -#ifdef CONFIG_64BIT - atomic64_t tl_runs; - atomic64_t outbound_tl_runs; - atomic64_t outbound_tl_runs_resched; - atomic64_t inbound_tl_runs; - atomic64_t inbound_tl_runs_resched; - atomic64_t inbound_thin_tl_runs; - atomic64_t inbound_thin_tl_runs_resched; - - atomic64_t siga_outs; - atomic64_t siga_ins; - atomic64_t siga_syncs; - atomic64_t pcis; - atomic64_t thinints; - atomic64_t fast_reqs; - - atomic64_t outbound_cnt; - atomic64_t inbound_cnt; -#else /* CONFIG_64BIT */ - atomic_t tl_runs; - atomic_t outbound_tl_runs; - atomic_t outbound_tl_runs_resched; - atomic_t inbound_tl_runs; - atomic_t inbound_tl_runs_resched; - atomic_t inbound_thin_tl_runs; - atomic_t inbound_thin_tl_runs_resched; - - atomic_t siga_outs; - atomic_t siga_ins; - atomic_t siga_syncs; - atomic_t pcis; - atomic_t thinints; - atomic_t fast_reqs; - - atomic_t outbound_cnt; - atomic_t inbound_cnt; -#endif /* CONFIG_64BIT */ +struct scssc_area { + struct chsc_header request; + u16 operation_code; + u16:16; + u32:32; + u32:32; + u64 summary_indicator_addr; + u64 subchannel_indicator_addr; + u32 ks:4; + u32 kc:4; + u32:21; + u32 isc:3; + u32 word_with_d_bit; + u32:32; + struct subchannel_id schid; + u32 reserved[1004]; + struct chsc_header response; + u32:32; +} __attribute__ ((packed)); + +struct qdio_input_q { + /* input buffer acknowledgement flag */ + int polling; + + /* last time of noticing incoming data */ + u64 timestamp; + + /* lock for clearing the acknowledgement */ + spinlock_t lock; }; -/* unlikely as the later the better */ -#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) -#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,~0U) -#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \ - qdio_siga_sync(q,~0U,0) +struct qdio_output_q { + /* failed siga-w attempts*/ + atomic_t busy_siga_counter; -#define NOW qdio_get_micros() -#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW -#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time) -#define SAVE_FRONTIER(q,val) q->last_move_ftc=val -#define GET_SAVED_FRONTIER(q) (q->last_move_ftc) + /* start time of busy condition */ + u64 timestamp; -#define MY_MODULE_STRING(x) #x + /* PCIs are enabled for the queue */ + int pci_out_enabled; -#ifdef CONFIG_64BIT -#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) -#else /* CONFIG_64BIT */ -#define QDIO_GET_ADDR(x) ((__u32)(long)x) -#endif /* CONFIG_64BIT */ + /* timer to check for more outbound work */ + struct timer_list timer; +}; struct qdio_q { - volatile struct slsb slsb; + struct slsb slsb; + union { + struct qdio_input_q in; + struct qdio_output_q out; + } u; - char unused[QDIO_MAX_BUFFERS_PER_Q]; + /* queue number */ + int nr; - __u32 * dev_st_chg_ind; + /* bitmask of queue number */ + int mask; + /* input or output queue */ int is_input_q; - struct subchannel_id schid; - struct ccw_device *cdev; - - unsigned int is_iqdio_q; - unsigned int is_thinint_q; - /* bit 0 means queue 0, bit 1 means queue 1, ... */ - unsigned int mask; - unsigned int q_no; + /* list of thinint input queues */ + struct list_head entry; + /* upper-layer program handler */ qdio_handler_t (*handler); - /* points to the next buffer to be checked for having - * been processed by the card (outbound) - * or to the next buffer the program should check for (inbound) */ - volatile int first_to_check; - /* and the last time it was: */ - volatile int last_move_ftc; + /* + * inbound: next buffer the program should check for + * outbound: next buffer to check for having been processed + * by the card + */ + int first_to_check; - atomic_t number_of_buffers_used; - atomic_t polling; + /* first_to_check of the last time */ + int last_move_ftc; - unsigned int siga_in; - unsigned int siga_out; - unsigned int siga_sync; - unsigned int siga_sync_done_on_thinints; - unsigned int siga_sync_done_on_outb_tis; - unsigned int hydra_gives_outbound_pcis; + /* beginning position for calling the program */ + int first_to_kick; - /* used to save beginning position when calling dd_handlers */ - int first_element_to_kick; + /* number of buffers in use by the adapter */ + atomic_t nr_buf_used; - atomic_t use_count; - atomic_t is_in_shutdown; - - void *irq_ptr; - - struct timer_list timer; -#ifdef QDIO_USE_TIMERS_FOR_POLLING - atomic_t timer_already_set; - spinlock_t timer_lock; -#else /* QDIO_USE_TIMERS_FOR_POLLING */ + struct qdio_irq *irq_ptr; struct tasklet_struct tasklet; -#endif /* QDIO_USE_TIMERS_FOR_POLLING */ - - enum qdio_irq_states state; - - /* used to store the error condition during a data transfer */ + /* error condition during a data transfer */ unsigned int qdio_error; - unsigned int siga_error; - unsigned int error_status_flags; - - /* list of interesting queues */ - volatile struct qdio_q *list_next; - volatile struct qdio_q *list_prev; struct sl *sl; - volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; - - struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; - - unsigned long int_parm; - - /*struct { - int in_bh_check_limit; - int threshold; - } threshold_classes[QDIO_STATS_CLASSES];*/ - - struct { - /* inbound: the time to stop polling - outbound: the time to kick peer */ - int threshold; /* the real value */ - - /* outbound: last time of do_QDIO - inbound: last time of noticing incoming data */ - /*__u64 last_transfer_times[QDIO_STATS_NUMBER]; - int last_transfer_index; */ - - __u64 last_transfer_time; - __u64 busy_start; - } timing; - atomic_t busy_siga_counter; - unsigned int queue_type; - unsigned int is_pci_out; - - /* leave this member at the end. won't be cleared in qdio_fill_qs */ - struct slib *slib; /* a page is allocated under this pointer, - sl points into this page, offset PAGE_SIZE/2 - (after slib) */ + struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; + + /* + * Warning: Leave this member at the end so it won't be cleared in + * qdio_fill_qs. A page is allocated under this pointer and used for + * slib and sl. slib is 2048 bytes big and sl points to offset + * PAGE_SIZE / 2. + */ + struct slib *slib; } __attribute__ ((aligned(256))); struct qdio_irq { - __u32 * volatile dev_st_chg_ind; + struct qib qib; + u32 *dsci; /* address of device state change indicator */ + struct ccw_device *cdev; unsigned long int_parm; struct subchannel_id schid; - - unsigned int is_iqdio_irq; - unsigned int is_thinint_irq; - unsigned int hydra_gives_outbound_pcis; - unsigned int sync_done_on_outb_pcis; - - /* QEBSM facility */ - unsigned int is_qebsm; - unsigned long sch_token; + unsigned long sch_token; /* QEBSM facility */ enum qdio_irq_states state; - unsigned int no_input_qs; - unsigned int no_output_qs; + struct siga_flag siga_flag; /* siga sync information from qdioac */ - unsigned char qdioac; + int nr_input_qs; + int nr_output_qs; struct ccw1 ccw; - struct ciw equeue; struct ciw aqueue; - struct qib qib; - - void (*original_int_handler) (struct ccw_device *, - unsigned long, struct irb *); + struct qdio_ssqd_desc ssqd_desc; + + void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); - /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ + /* + * Warning: Leave these members together at the end so they won't be + * cleared in qdio_setup_irq. + */ struct qdr *qdr; + unsigned long chsc_page; + struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; - struct semaphore setting_up_sema; + + struct mutex setup_mutex; }; -#endif + +/* helper functions */ +#define queue_type(q) q->irq_ptr->qib.qfmt + +#define is_thinint_irq(irq) \ + (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ + css_general_characteristics.aif_osa) + +/* the highest iqdio queue is used for multicast */ +static inline int multicast_outbound(struct qdio_q *q) +{ + return (q->irq_ptr->nr_output_qs > 1) && + (q->nr == q->irq_ptr->nr_output_qs - 1); +} + +static inline unsigned long long get_usecs(void) +{ + return monotonic_clock() >> 12; +} + +#define pci_out_supported(q) \ + (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) +#define is_qebsm(q) (q->irq_ptr->sch_token != 0) + +#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti) +#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti) +#define need_siga_in(q) (q->irq_ptr->siga_flag.input) +#define need_siga_out(q) (q->irq_ptr->siga_flag.output) +#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) +#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) + +#define for_each_input_queue(irq_ptr, q, i) \ + for (i = 0, q = irq_ptr->input_qs[0]; \ + i < irq_ptr->nr_input_qs; \ + q = irq_ptr->input_qs[++i]) +#define for_each_output_queue(irq_ptr, q, i) \ + for (i = 0, q = irq_ptr->output_qs[0]; \ + i < irq_ptr->nr_output_qs; \ + q = irq_ptr->output_qs[++i]) + +#define prev_buf(bufnr) \ + ((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK) +#define next_buf(bufnr) \ + ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) +#define add_buf(bufnr, inc) \ + ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) + +/* prototypes for thin interrupt */ +void qdio_sync_after_thinint(struct qdio_q *q); +int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); +void qdio_check_outbound_after_thinint(struct qdio_q *q); +int qdio_inbound_q_moved(struct qdio_q *q); +void qdio_kick_inbound_handler(struct qdio_q *q); +void qdio_stop_polling(struct qdio_q *q); +int qdio_siga_sync_q(struct qdio_q *q); + +void qdio_setup_thinint(struct qdio_irq *irq_ptr); +int qdio_establish_thinint(struct qdio_irq *irq_ptr); +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr); +void tiqdio_inbound_processing(unsigned long q); +int tiqdio_allocate_memory(void); +void tiqdio_free_memory(void); +int tiqdio_register_thinints(void); +void tiqdio_unregister_thinints(void); + +/* prototypes for setup */ +void qdio_inbound_processing(unsigned long data); +void qdio_outbound_processing(unsigned long data); +void qdio_outbound_timer(unsigned long data); +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb); +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, + int nr_output_qs); +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); +int qdio_setup_irq(struct qdio_initialize *init_data); +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_release_memory(struct qdio_irq *irq_ptr); +int qdio_setup_init(void); +void qdio_setup_exit(void); + +#endif /* _CIO_QDIO_H */ diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c new file mode 100644 index 000000000000..337aa3087a78 --- /dev/null +++ b/drivers/s390/cio/qdio_debug.c @@ -0,0 +1,240 @@ +/* + * drivers/s390/cio/qdio_debug.c + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <asm/qdio.h> +#include <asm/debug.h> +#include "qdio_debug.h" +#include "qdio.h" + +debug_info_t *qdio_dbf_setup; +debug_info_t *qdio_dbf_trace; + +static struct dentry *debugfs_root; +#define MAX_DEBUGFS_QUEUES 32 +static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; +static DEFINE_MUTEX(debugfs_mutex); + +void qdio_allocate_do_dbf(struct qdio_initialize *init_data) +{ + char dbf_text[20]; + + sprintf(dbf_text, "qfmt:%x", init_data->q_format); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8); + sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *)); + sprintf(dbf_text, "niq:%4x", init_data->no_input_qs); + QDIO_DBF_TEXT0(0, setup, dbf_text); + sprintf(dbf_text, "noq:%4x", init_data->no_output_qs); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long)); + QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long)); + QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *)); + QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *)); +} + +static void qdio_unregister_dbf_views(void) +{ + if (qdio_dbf_setup) + debug_unregister(qdio_dbf_setup); + if (qdio_dbf_trace) + debug_unregister(qdio_dbf_trace); +} + +static int qdio_register_dbf_views(void) +{ + qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES, + QDIO_DBF_SETUP_NR_AREAS, + QDIO_DBF_SETUP_LEN); + if (!qdio_dbf_setup) + goto oom; + debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL); + + qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES, + QDIO_DBF_TRACE_NR_AREAS, + QDIO_DBF_TRACE_LEN); + if (!qdio_dbf_trace) + goto oom; + debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view); + debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL); + return 0; +oom: + qdio_unregister_dbf_views(); + return -ENOMEM; +} + +static int qstat_show(struct seq_file *m, void *v) +{ + unsigned char state; + struct qdio_q *q = m->private; + int i; + + if (!q) + return 0; + + seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci); + seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); + seq_printf(m, "ftc: %d\n", q->first_to_check); + seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); + seq_printf(m, "polling: %d\n", q->u.in.polling); + seq_printf(m, "slsb buffer states:\n"); + + qdio_siga_sync_q(q); + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { + get_buf_state(q, i, &state); + switch (state) { + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_OUTPUT_NOT_INIT: + seq_printf(m, "N"); + break; + case SLSB_P_INPUT_PRIMED: + case SLSB_CU_OUTPUT_PRIMED: + seq_printf(m, "+"); + break; + case SLSB_P_INPUT_ACK: + seq_printf(m, "A"); + break; + case SLSB_P_INPUT_ERROR: + case SLSB_P_OUTPUT_ERROR: + seq_printf(m, "x"); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_OUTPUT_EMPTY: + seq_printf(m, "-"); + break; + case SLSB_P_INPUT_HALTED: + case SLSB_P_OUTPUT_HALTED: + seq_printf(m, "."); + break; + default: + seq_printf(m, "?"); + } + if (i == 63) + seq_printf(m, "\n"); + } + seq_printf(m, "\n"); + return 0; +} + +static ssize_t qstat_seq_write(struct file *file, const char __user *buf, + size_t count, loff_t *off) +{ + struct seq_file *seq = file->private_data; + struct qdio_q *q = seq->private; + + if (!q) + return 0; + + if (q->is_input_q) + xchg(q->irq_ptr->dsci, 1); + local_bh_disable(); + tasklet_schedule(&q->tasklet); + local_bh_enable(); + return count; +} + +static int qstat_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qstat_show, + filp->f_path.dentry->d_inode->i_private); +} + +static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name) +{ + memset(name, 0, sizeof(name)); + sprintf(name, "%s", cdev->dev.bus_id); + if (q->is_input_q) + sprintf(name + strlen(name), "_input"); + else + sprintf(name + strlen(name), "_output"); + sprintf(name + strlen(name), "_%d", q->nr); +} + +static void remove_debugfs_entry(struct qdio_q *q) +{ + int i; + + for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) { + if (!debugfs_queues[i]) + continue; + if (debugfs_queues[i]->d_inode->i_private == q) { + debugfs_remove(debugfs_queues[i]); + debugfs_queues[i] = NULL; + } + } +} + +static struct file_operations debugfs_fops = { + .owner = THIS_MODULE, + .open = qstat_seq_open, + .read = seq_read, + .write = qstat_seq_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) +{ + int i = 0; + char name[40]; + + while (debugfs_queues[i] != NULL) { + i++; + if (i >= MAX_DEBUGFS_QUEUES) + return; + } + get_queue_name(q, cdev, name); + debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, + debugfs_root, q, &debugfs_fops); +} + +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +{ + struct qdio_q *q; + int i; + + mutex_lock(&debugfs_mutex); + for_each_input_queue(irq_ptr, q, i) + setup_debugfs_entry(q, cdev); + for_each_output_queue(irq_ptr, q, i) + setup_debugfs_entry(q, cdev); + mutex_unlock(&debugfs_mutex); +} + +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) +{ + struct qdio_q *q; + int i; + + mutex_lock(&debugfs_mutex); + for_each_input_queue(irq_ptr, q, i) + remove_debugfs_entry(q); + for_each_output_queue(irq_ptr, q, i) + remove_debugfs_entry(q); + mutex_unlock(&debugfs_mutex); +} + +int __init qdio_debug_init(void) +{ + debugfs_root = debugfs_create_dir("qdio_queues", NULL); + return qdio_register_dbf_views(); +} + +void qdio_debug_exit(void) +{ + debugfs_remove(debugfs_root); + qdio_unregister_dbf_views(); +} diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h new file mode 100644 index 000000000000..8484b83698e1 --- /dev/null +++ b/drivers/s390/cio/qdio_debug.h @@ -0,0 +1,91 @@ +/* + * drivers/s390/cio/qdio_debug.h + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#ifndef QDIO_DEBUG_H +#define QDIO_DEBUG_H + +#include <asm/debug.h> +#include <asm/qdio.h> +#include "qdio.h" + +#define QDIO_DBF_HEX(ex, name, level, addr, len) \ + do { \ + if (ex) \ + debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \ + else \ + debug_event(qdio_dbf_##name, level, (void *)(addr), len); \ + } while (0) +#define QDIO_DBF_TEXT(ex, name, level, text) \ + do { \ + if (ex) \ + debug_text_exception(qdio_dbf_##name, level, text); \ + else \ + debug_text_event(qdio_dbf_##name, level, text); \ + } while (0) + +#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len) +#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len) +#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len) + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len) +#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len) +#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len) +#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len) +#else +#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0) +#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0) +#endif /* CONFIG_QDIO_DEBUG */ + +#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text) +#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text) +#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text) + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text) +#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text) +#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text) +#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text) +#else +#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0) +#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0) +#endif /* CONFIG_QDIO_DEBUG */ + +/* s390dbf views */ +#define QDIO_DBF_SETUP_LEN 8 +#define QDIO_DBF_SETUP_PAGES 4 +#define QDIO_DBF_SETUP_NR_AREAS 1 + +#define QDIO_DBF_TRACE_LEN 8 +#define QDIO_DBF_TRACE_NR_AREAS 2 + +#ifdef CONFIG_QDIO_DEBUG +#define QDIO_DBF_TRACE_PAGES 16 +#define QDIO_DBF_SETUP_LEVEL 6 +#define QDIO_DBF_TRACE_LEVEL 4 +#else /* !CONFIG_QDIO_DEBUG */ +#define QDIO_DBF_TRACE_PAGES 4 +#define QDIO_DBF_SETUP_LEVEL 2 +#define QDIO_DBF_TRACE_LEVEL 2 +#endif /* CONFIG_QDIO_DEBUG */ + +extern debug_info_t *qdio_dbf_setup; +extern debug_info_t *qdio_dbf_trace; + +void qdio_allocate_do_dbf(struct qdio_initialize *init_data); +void debug_print_bstat(struct qdio_q *q); +void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, + struct ccw_device *cdev); +int qdio_debug_init(void); +void qdio_debug_exit(void); +#endif diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c new file mode 100644 index 000000000000..d10c73cc1688 --- /dev/null +++ b/drivers/s390/cio/qdio_main.c @@ -0,0 +1,1755 @@ +/* + * linux/drivers/s390/cio/qdio_main.c + * + * Linux for s390 qdio support, buffer handling, qdio API and module support. + * + * Copyright 2000,2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <asm/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "qdio.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ + "Jan Glauber <jang@linux.vnet.ibm.com>"); +MODULE_DESCRIPTION("QDIO base support"); +MODULE_LICENSE("GPL"); + +static inline int do_siga_sync(struct subchannel_id schid, + unsigned int out_mask, unsigned int in_mask) +{ + register unsigned long __fc asm ("0") = 2; + register struct subchannel_id __schid asm ("1") = schid; + register unsigned long out asm ("2") = out_mask; + register unsigned long in asm ("3") = in_mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); + return cc; +} + +static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) +{ + register unsigned long __fc asm ("0") = 1; + register struct subchannel_id __schid asm ("1") = schid; + register unsigned long __mask asm ("2") = mask; + int cc; + + asm volatile( + " siga 0\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) + : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); + return cc; +} + +/** + * do_siga_output - perform SIGA-w/wt function + * @schid: subchannel id or in case of QEBSM the subchannel token + * @mask: which output queues to process + * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer + * @fc: function code to perform + * + * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. + * Note: For IQDC unicast queues only the highest priority queue is processed. + */ +static inline int do_siga_output(unsigned long schid, unsigned long mask, + u32 *bb, unsigned int fc) +{ + register unsigned long __fc asm("0") = fc; + register unsigned long __schid asm("1") = schid; + register unsigned long __mask asm("2") = mask; + int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; + + asm volatile( + " siga 0\n" + "0: ipm %0\n" + " srl %0,28\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) + : : "cc", "memory"); + *bb = ((unsigned int) __fc) >> 31; + return cc; +} + +static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) +{ + char dbf_text[15]; + + /* all done or next buffer state different */ + if (ccq == 0 || ccq == 32) + return 0; + /* not all buffers processed */ + if (ccq == 96 || ccq == 97) + return 1; + /* notify devices immediately */ + sprintf(dbf_text, "%d", ccq); + QDIO_DBF_TEXT2(1, trace, dbf_text); + return -EIO; +} + +/** + * qdio_do_eqbs - extract buffer states for QEBSM + * @q: queue to manipulate + * @state: state of the extracted buffers + * @start: buffer number to start at + * @count: count of buffers to examine + * + * Returns the number of successfull extracted equal buffer states. + * Stops processing if a state is different from the last buffers state. + */ +static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, + int start, int count) +{ + unsigned int ccq = 0; + int tmp_count = count, tmp_start = start; + int nr = q->nr; + int rc; + char dbf_text[15]; + + BUG_ON(!q->irq_ptr->sch_token); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + rc = qdio_check_ccq(q, ccq); + + /* At least one buffer was processed, return and extract the remaining + * buffers later. + */ + if ((ccq == 96) && (count != tmp_count)) + return (count - tmp_count); + if (rc == 1) { + QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); + goto again; + } + + if (rc < 0) { + QDIO_DBF_TEXT2(1, trace, "eqberr"); + sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); + QDIO_DBF_TEXT2(1, trace, dbf_text); + q->handler(q->irq_ptr->cdev, + QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, q->irq_ptr->int_parm); + return 0; + } + return count - tmp_count; +} + +/** + * qdio_do_sqbs - set buffer states for QEBSM + * @q: queue to manipulate + * @state: new state of the buffers + * @start: first buffer number to change + * @count: how many buffers to change + * + * Returns the number of successfully changed buffers. + * Does retrying until the specified count of buffer states is set or an + * error occurs. + */ +static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, + int count) +{ + unsigned int ccq = 0; + int tmp_count = count, tmp_start = start; + int nr = q->nr; + int rc; + char dbf_text[15]; + + BUG_ON(!q->irq_ptr->sch_token); + + if (!q->is_input_q) + nr += q->irq_ptr->nr_input_qs; +again: + ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); + rc = qdio_check_ccq(q, ccq); + if (rc == 1) { + QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); + goto again; + } + if (rc < 0) { + QDIO_DBF_TEXT3(1, trace, "sqberr"); + sprintf(dbf_text, "%2x,%2x", count, tmp_count); + QDIO_DBF_TEXT3(1, trace, dbf_text); + sprintf(dbf_text, "%d,%d", ccq, nr); + QDIO_DBF_TEXT3(1, trace, dbf_text); + + q->handler(q->irq_ptr->cdev, + QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, q->irq_ptr->int_parm); + return 0; + } + WARN_ON(tmp_count); + return count - tmp_count; +} + +/* returns number of examined buffers and their common state in *state */ +static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, + unsigned char *state, unsigned int count) +{ + unsigned char __state = 0; + int i; + + BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); + BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); + + if (is_qebsm(q)) + return qdio_do_eqbs(q, state, bufnr, count); + + for (i = 0; i < count; i++) { + if (!__state) + __state = q->slsb.val[bufnr]; + else if (q->slsb.val[bufnr] != __state) + break; + bufnr = next_buf(bufnr); + } + *state = __state; + return i; +} + +inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, + unsigned char *state) +{ + return get_buf_states(q, bufnr, state, 1); +} + +/* wrap-around safe setting of slsb states, returns number of changed buffers */ +static inline int set_buf_states(struct qdio_q *q, int bufnr, + unsigned char state, int count) +{ + int i; + + BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); + BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); + + if (is_qebsm(q)) + return qdio_do_sqbs(q, state, bufnr, count); + + for (i = 0; i < count; i++) { + xchg(&q->slsb.val[bufnr], state); + bufnr = next_buf(bufnr); + } + return count; +} + +static inline int set_buf_state(struct qdio_q *q, int bufnr, + unsigned char state) +{ + return set_buf_states(q, bufnr, state, 1); +} + +/* set slsb states to initial state */ +void qdio_init_buf_states(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); + for_each_output_queue(irq_ptr, q, i) + set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, + QDIO_MAX_BUFFERS_PER_Q); +} + +static int qdio_siga_sync(struct qdio_q *q, unsigned int output, + unsigned int input) +{ + int cc; + + if (!need_siga_sync(q)) + return 0; + + qdio_perf_stat_inc(&perf_stats.siga_sync); + + cc = do_siga_sync(q->irq_ptr->schid, output, input); + if (cc) { + QDIO_DBF_TEXT4(0, trace, "sigasync"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + } + return cc; +} + +inline int qdio_siga_sync_q(struct qdio_q *q) +{ + if (q->is_input_q) + return qdio_siga_sync(q, 0, q->mask); + else + return qdio_siga_sync(q, q->mask, 0); +} + +static inline int qdio_siga_sync_out(struct qdio_q *q) +{ + return qdio_siga_sync(q, ~0U, 0); +} + +static inline int qdio_siga_sync_all(struct qdio_q *q) +{ + return qdio_siga_sync(q, ~0U, ~0U); +} + +static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) +{ + unsigned int fc = 0; + unsigned long schid; + + if (!is_qebsm(q)) + schid = *((u32 *)&q->irq_ptr->schid); + else { + schid = q->irq_ptr->sch_token; + fc |= 0x80; + } + return do_siga_output(schid, q->mask, busy_bit, fc); +} + +static int qdio_siga_output(struct qdio_q *q) +{ + int cc; + u32 busy_bit; + u64 start_time = 0; + + QDIO_DBF_TEXT5(0, trace, "sigaout"); + QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); + + qdio_perf_stat_inc(&perf_stats.siga_out); +again: + cc = qdio_do_siga_output(q, &busy_bit); + if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { + if (!start_time) + start_time = get_usecs(); + else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) + goto again; + } + + if (cc == 2 && busy_bit) + cc |= QDIO_ERROR_SIGA_BUSY; + if (cc) + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + return cc; +} + +static inline int qdio_siga_input(struct qdio_q *q) +{ + int cc; + + QDIO_DBF_TEXT4(0, trace, "sigain"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + + qdio_perf_stat_inc(&perf_stats.siga_in); + + cc = do_siga_input(q->irq_ptr->schid, q->mask); + if (cc) + QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); + return cc; +} + +/* called from thinint inbound handler */ +void qdio_sync_after_thinint(struct qdio_q *q) +{ + if (pci_out_supported(q)) { + if (need_siga_sync_thinint(q)) + qdio_siga_sync_all(q); + else if (need_siga_sync_out_thinint(q)) + qdio_siga_sync_out(q); + } else + qdio_siga_sync_q(q); +} + +inline void qdio_stop_polling(struct qdio_q *q) +{ + spin_lock_bh(&q->u.in.lock); + if (!q->u.in.polling) { + spin_unlock_bh(&q->u.in.lock); + return; + } + q->u.in.polling = 0; + qdio_perf_stat_inc(&perf_stats.debug_stop_polling); + + /* show the card that we are not polling anymore */ + set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); + spin_unlock_bh(&q->u.in.lock); +} + +static void announce_buffer_error(struct qdio_q *q) +{ + char dbf_text[15]; + + if (q->is_input_q) + QDIO_DBF_TEXT3(1, trace, "inperr"); + else + QDIO_DBF_TEXT3(0, trace, "outperr"); + + sprintf(dbf_text, "%x-%x-%x", q->first_to_check, + q->sbal[q->first_to_check]->element[14].flags, + q->sbal[q->first_to_check]->element[15].flags); + QDIO_DBF_TEXT3(1, trace, dbf_text); + QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); + + q->qdio_error = QDIO_ERROR_SLSB_STATE; +} + +static int get_inbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state; + + /* + * If we still poll don't update last_move_ftc, keep the + * previously ACK buffer there. + */ + if (!q->u.in.polling) + q->last_move_ftc = q->first_to_check; + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + + /* + * No siga sync here, as a PCI or we after a thin interrupt + * will sync the queues. + */ + + /* need to set count to 1 for non-qebsm */ + if (!is_qebsm(q)) + count = 1; + +check_next: + if (q->first_to_check == stop) + goto out; + + count = get_buf_states(q, q->first_to_check, &state, count); + if (!count) + goto out; + + switch (state) { + case SLSB_P_INPUT_PRIMED: + QDIO_DBF_TEXT5(0, trace, "inptprim"); + + /* + * Only ACK the first buffer. The ACK will be removed in + * qdio_stop_polling. + */ + if (q->u.in.polling) + state = SLSB_P_INPUT_NOT_INIT; + else { + q->u.in.polling = 1; + state = SLSB_P_INPUT_ACK; + } + set_buf_state(q, q->first_to_check, state); + + /* + * Need to change all PRIMED buffers to NOT_INIT, otherwise + * we're loosing initiative in the thinint code. + */ + if (count > 1) + set_buf_states(q, next_buf(q->first_to_check), + SLSB_P_INPUT_NOT_INIT, count - 1); + + /* + * No siga-sync needed for non-qebsm here, as the inbound queue + * will be synced on the next siga-r, resp. + * tiqdio_is_inbound_q_done will do the siga-sync. + */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + goto check_next; + case SLSB_P_INPUT_ERROR: + announce_buffer_error(q); + /* process the buffer, the upper layer will take care of it */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + break; + case SLSB_CU_INPUT_EMPTY: + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_ACK: + QDIO_DBF_TEXT5(0, trace, "inpnipro"); + break; + default: + BUG(); + } +out: + QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); + return q->first_to_check; +} + +int qdio_inbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_inbound_buffer_frontier(q); + + if ((bufnr != q->last_move_ftc) || q->qdio_error) { + if (!need_siga_sync(q) && !pci_out_supported(q)) + q->u.in.timestamp = get_usecs(); + + QDIO_DBF_TEXT4(0, trace, "inhasmvd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + return 1; + } else + return 0; +} + +static int qdio_inbound_q_done(struct qdio_q *q) +{ + unsigned char state; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + if (!atomic_read(&q->nr_buf_used)) + return 1; + + /* + * We need that one for synchronization with the adapter, as it + * does a kind of PCI avoidance. + */ + qdio_siga_sync_q(q); + + get_buf_state(q, q->first_to_check, &state); + if (state == SLSB_P_INPUT_PRIMED) + /* we got something to do */ + return 0; + + /* on VM, we don't poll, so the q is always done here */ + if (need_siga_sync(q) || pci_out_supported(q)) + return 1; + + /* + * At this point we know, that inbound first_to_check + * has (probably) not moved (see qdio_inbound_processing). + */ + if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "inqisdon"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + sprintf(dbf_text, "pf%02x", q->first_to_check); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 1; + } else { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "inqisntd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + sprintf(dbf_text, "pf%02x", q->first_to_check); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 0; + } +} + +void qdio_kick_inbound_handler(struct qdio_q *q) +{ + int count, start, end; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + qdio_perf_stat_inc(&perf_stats.inbound_handler); + + start = q->first_to_kick; + end = q->first_to_check; + if (end >= start) + count = end - start; + else + count = end + QDIO_MAX_BUFFERS_PER_Q - start; + +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text, "s=%2xc=%2x", start, count); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return; + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, + start, count, q->irq_ptr->int_parm); + + /* for the next time */ + q->first_to_kick = q->first_to_check; + q->qdio_error = 0; +} + +static void __qdio_inbound_processing(struct qdio_q *q) +{ + qdio_perf_stat_inc(&perf_stats.tasklet_inbound); +again: + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_inbound_handler(q); + + if (!qdio_inbound_q_done(q)) + /* means poll time is not yet over */ + goto again; + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!qdio_inbound_q_done(q)) + goto again; +} + +/* inbound tasklet */ +void qdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_inbound_processing(q); +} + +static int get_outbound_buffer_frontier(struct qdio_q *q) +{ + int count, stop; + unsigned char state; + + if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || + (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) + qdio_siga_sync_q(q); + + /* + * Don't check 128 buffers, as otherwise qdio_inbound_q_moved + * would return 0. + */ + count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); + stop = add_buf(q->first_to_check, count); + + /* need to set count to 1 for non-qebsm */ + if (!is_qebsm(q)) + count = 1; + +check_next: + if (q->first_to_check == stop) + return q->first_to_check; + + count = get_buf_states(q, q->first_to_check, &state, count); + if (!count) + return q->first_to_check; + + switch (state) { + case SLSB_P_OUTPUT_EMPTY: + /* the adapter got it */ + QDIO_DBF_TEXT5(0, trace, "outpempt"); + + atomic_sub(count, &q->nr_buf_used); + q->first_to_check = add_buf(q->first_to_check, count); + /* + * We fetch all buffer states at once. get_buf_states may + * return count < stop. For QEBSM we do not loop. + */ + if (is_qebsm(q)) + break; + goto check_next; + case SLSB_P_OUTPUT_ERROR: + announce_buffer_error(q); + /* process the buffer, the upper layer will take care of it */ + q->first_to_check = add_buf(q->first_to_check, count); + atomic_sub(count, &q->nr_buf_used); + break; + case SLSB_CU_OUTPUT_PRIMED: + /* the adapter has not fetched the output yet */ + QDIO_DBF_TEXT5(0, trace, "outpprim"); + break; + case SLSB_P_OUTPUT_NOT_INIT: + case SLSB_P_OUTPUT_HALTED: + break; + default: + BUG(); + } + return q->first_to_check; +} + +/* all buffers processed? */ +static inline int qdio_outbound_q_done(struct qdio_q *q) +{ + return atomic_read(&q->nr_buf_used) == 0; +} + +static inline int qdio_outbound_q_moved(struct qdio_q *q) +{ + int bufnr; + + bufnr = get_outbound_buffer_frontier(q); + + if ((bufnr != q->last_move_ftc) || q->qdio_error) { + q->last_move_ftc = bufnr; + QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + return 1; + } else + return 0; +} + +/* + * VM could present us cc=2 and busy bit set on SIGA-write + * during reconfiguration of their Guest LAN (only in iqdio mode, + * otherwise qdio is asynchronous and cc=2 and busy bit there will take + * the queues down immediately). + * + * Therefore qdio_siga_output will try for a short time constantly, + * if such a condition occurs. If it doesn't change, it will + * increase the busy_siga_counter and save the timestamp, and + * schedule the queue for later processing. qdio_outbound_processing + * will check out the counter. If non-zero, it will call qdio_kick_outbound_q + * as often as the value of the counter. This will attempt further SIGA + * instructions. For each successful SIGA, the counter is + * decreased, for failing SIGAs the counter remains the same, after + * all. After some time of no movement, qdio_kick_outbound_q will + * finally fail and reflect corresponding error codes to call + * the upper layer module and have it take the queues down. + * + * Note that this is a change from the original HiperSockets design + * (saying cc=2 and busy bit means take the queues down), but in + * these days Guest LAN didn't exist... excessive cc=2 with busy bit + * conditions will still take the queues down, but the threshold is + * higher due to the Guest LAN environment. + * + * Called from outbound tasklet and do_QDIO handler. + */ +static void qdio_kick_outbound_q(struct qdio_q *q) +{ + int rc; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; + + QDIO_DBF_TEXT5(0, trace, "kickoutq"); + QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); +#endif /* CONFIG_QDIO_DEBUG */ + + if (!need_siga_out(q)) + return; + + rc = qdio_siga_output(q); + switch (rc) { + case 0: + /* went smooth this time, reset timestamp */ + q->u.out.timestamp = 0; + + /* TODO: improve error handling for CC=0 case */ +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT3(0, trace, "cc2reslv"); + sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, + atomic_read(&q->u.out.busy_siga_counter)); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + break; + /* cc=2 and busy bit */ + case (2 | QDIO_ERROR_SIGA_BUSY): + atomic_inc(&q->u.out.busy_siga_counter); + + /* if the last siga was successful, save timestamp here */ + if (!q->u.out.timestamp) + q->u.out.timestamp = get_usecs(); + + /* if we're in time, don't touch qdio_error */ + if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { + tasklet_schedule(&q->tasklet); + break; + } + QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, + atomic_read(&q->u.out.busy_siga_counter)); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + default: + /* for plain cc=1, 2 or 3 */ + q->qdio_error = rc; + } +} + +static void qdio_kick_outbound_handler(struct qdio_q *q) +{ + int start, end, count; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + start = q->first_to_kick; + end = q->last_move_ftc; + if (end >= start) + count = end - start; + else + count = end + QDIO_MAX_BUFFERS_PER_Q - start; + +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0, trace, "kickouth"); + QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); + + sprintf(dbf_text, "s=%2xc=%2x", start, count); + QDIO_DBF_TEXT4(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return; + + q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, + q->irq_ptr->int_parm); + + /* for the next time: */ + q->first_to_kick = q->last_move_ftc; + q->qdio_error = 0; +} + +static void __qdio_outbound_processing(struct qdio_q *q) +{ + int siga_attempts; + + qdio_perf_stat_inc(&perf_stats.tasklet_outbound); + + /* see comment in qdio_kick_outbound_q */ + siga_attempts = atomic_read(&q->u.out.busy_siga_counter); + while (siga_attempts--) { + atomic_dec(&q->u.out.busy_siga_counter); + qdio_kick_outbound_q(q); + } + + BUG_ON(atomic_read(&q->nr_buf_used) < 0); + + if (qdio_outbound_q_moved(q)) + qdio_kick_outbound_handler(q); + + if (queue_type(q) == QDIO_ZFCP_QFMT) { + if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) + tasklet_schedule(&q->tasklet); + return; + } + + /* bail out for HiperSockets unicast queues */ + if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) + return; + + if (q->u.out.pci_out_enabled) + return; + + /* + * Now we know that queue type is either qeth without pci enabled + * or HiperSockets multicast. Make sure buffer switch from PRIMED to + * EMPTY is noticed and outbound_handler is called after some time. + */ + if (qdio_outbound_q_done(q)) + del_timer(&q->u.out.timer); + else { + if (!timer_pending(&q->u.out.timer)) { + mod_timer(&q->u.out.timer, jiffies + 10 * HZ); + qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer); + } + } +} + +/* outbound tasklet */ +void qdio_outbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + __qdio_outbound_processing(q); +} + +void qdio_outbound_timer(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + tasklet_schedule(&q->tasklet); +} + +/* called from thinint inbound tasklet */ +void qdio_check_outbound_after_thinint(struct qdio_q *q) +{ + struct qdio_q *out; + int i; + + if (!pci_out_supported(q)) + return; + + for_each_output_queue(q->irq_ptr, out, i) + if (!qdio_outbound_q_done(out)) + tasklet_schedule(&out->tasklet); +} + +static inline void qdio_set_state(struct qdio_irq *irq_ptr, + enum qdio_irq_states state) +{ +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; + + QDIO_DBF_TEXT5(0, trace, "newstate"); + sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); + QDIO_DBF_TEXT5(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + irq_ptr->state = state; + mb(); +} + +static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) +{ + char dbf_text[15]; + + if (irb->esw.esw0.erw.cons) { + sprintf(dbf_text, "sens%4x", schid.sch_no); + QDIO_DBF_TEXT2(1, trace, dbf_text); + QDIO_DBF_HEX0(0, trace, irb, 64); + QDIO_DBF_HEX0(0, trace, irb->ecw, 64); + } +} + +/* PCI interrupt handler */ +static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) +{ + int i; + struct qdio_q *q; + + qdio_perf_stat_inc(&perf_stats.pci_int); + + for_each_input_queue(irq_ptr, q, i) + tasklet_schedule(&q->tasklet); + + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + return; + + for_each_output_queue(irq_ptr, q, i) { + if (qdio_outbound_q_done(q)) + continue; + + if (!siga_syncs_out_pci(q)) + qdio_siga_sync_q(q); + + tasklet_schedule(&q->tasklet); + } +} + +static void qdio_handle_activate_check(struct ccw_device *cdev, + unsigned long intparm, int cstat, int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + char dbf_text[15]; + + QDIO_DBF_TEXT2(1, trace, "ick2"); + sprintf(dbf_text, "%s", cdev->dev.bus_id); + QDIO_DBF_TEXT2(1, trace, dbf_text); + QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); + + if (irq_ptr->nr_input_qs) { + q = irq_ptr->input_qs[0]; + } else if (irq_ptr->nr_output_qs) { + q = irq_ptr->output_qs[0]; + } else { + dump_stack(); + goto no_handler; + } + q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, + 0, -1, -1, irq_ptr->int_parm); +no_handler: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); +} + +static void qdio_call_shutdown(struct work_struct *work) +{ + struct ccw_device_private *priv; + struct ccw_device *cdev; + + priv = container_of(work, struct ccw_device_private, kick_work); + cdev = priv->cdev; + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + put_device(&cdev->dev); +} + +static void qdio_int_error(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_INACTIVE: + case QDIO_IRQ_STATE_CLEANUP: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + break; + case QDIO_IRQ_STATE_ESTABLISHED: + case QDIO_IRQ_STATE_ACTIVE: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); + if (get_device(&cdev->dev)) { + /* Can't call shutdown from interrupt context. */ + PREPARE_WORK(&cdev->private->kick_work, + qdio_call_shutdown); + queue_work(ccw_device_work, &cdev->private->kick_work); + } + break; + default: + WARN_ON(1); + } + wake_up(&cdev->private->wait_q); +} + +static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, + int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { + QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); + goto error; + } + + if (!(dstat & DEV_STAT_DEV_END)) { + QDIO_DBF_TEXT2(1, setup, "eq:no de"); + goto error; + } + + if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { + QDIO_DBF_TEXT2(1, setup, "eq:badio"); + goto error; + } + return 0; +error: + QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); + QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + return 1; +} + +static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, + int dstat) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + char dbf_text[15]; + + sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + if (!qdio_establish_check_errors(cdev, cstat, dstat)) + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); +} + +/* qdio interrupt handler */ +void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + int cstat, dstat; + char dbf_text[15]; + + qdio_perf_stat_inc(&perf_stats.qdio_int); + + if (!intparm || !irq_ptr) { + sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + return; + } + + if (IS_ERR(irb)) { + switch (PTR_ERR(irb)) { + case -EIO: + sprintf(dbf_text, "ierr%4x", + cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + qdio_int_error(cdev); + return; + case -ETIMEDOUT: + sprintf(dbf_text, "qtoh%4x", + cdev->private->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + qdio_int_error(cdev); + return; + default: + WARN_ON(1); + return; + } + } + qdio_irq_check_sense(irq_ptr->schid, irb); + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_INACTIVE: + qdio_establish_handle_irq(cdev, cstat, dstat); + break; + + case QDIO_IRQ_STATE_CLEANUP: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + break; + + case QDIO_IRQ_STATE_ESTABLISHED: + case QDIO_IRQ_STATE_ACTIVE: + if (cstat & SCHN_STAT_PCI) { + qdio_int_handler_pci(irq_ptr); + /* no state change so no need to wake up wait_q */ + return; + } + if ((cstat & ~SCHN_STAT_PCI) || dstat) { + qdio_handle_activate_check(cdev, intparm, cstat, + dstat); + break; + } + default: + WARN_ON(1); + } + wake_up(&cdev->private->wait_q); +} + +/** + * qdio_get_ssqd_desc - get qdio subchannel description + * @cdev: ccw device to get description for + * + * Returns a pointer to the saved qdio subchannel description, + * or NULL for not setup qdio devices. + */ +struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + + QDIO_DBF_TEXT0(0, setup, "getssqd"); + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return NULL; + + return &irq_ptr->ssqd_desc; +} +EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); + +/** + * qdio_cleanup - shutdown queues and free data structures + * @cdev: associated ccw device + * @how: use halt or clear to shutdown + * + * This function calls qdio_shutdown() for @cdev with method @how + * and on success qdio_free() for @cdev. + */ +int qdio_cleanup(struct ccw_device *cdev, int how) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + int rc; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + sprintf(dbf_text, "qcln%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + rc = qdio_shutdown(cdev, how); + if (rc == 0) + rc = qdio_free(cdev); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_cleanup); + +static void qdio_shutdown_queues(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) + tasklet_disable(&q->tasklet); + + for_each_output_queue(irq_ptr, q, i) { + tasklet_disable(&q->tasklet); + del_timer(&q->u.out.timer); + } +} + +/** + * qdio_shutdown - shut down a qdio subchannel + * @cdev: associated ccw device + * @how: use halt or clear to shutdown + */ +int qdio_shutdown(struct ccw_device *cdev, int how) +{ + struct qdio_irq *irq_ptr; + int rc; + unsigned long flags; + char dbf_text[15]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + mutex_lock(&irq_ptr->setup_mutex); + /* + * Subchannel was already shot down. We cannot prevent being called + * twice since cio may trigger a shutdown asynchronously. + */ + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + mutex_unlock(&irq_ptr->setup_mutex); + return 0; + } + + sprintf(dbf_text, "qsqs%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + tiqdio_remove_input_queues(irq_ptr); + qdio_shutdown_queues(cdev); + qdio_shutdown_debug_entries(irq_ptr, cdev); + + /* cleanup subchannel */ + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + + if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) + rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); + else + /* default behaviour is halt */ + rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); + if (rc) { + sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + sprintf(dbf_text, "rc=%d", rc); + QDIO_DBF_TEXT0(0, setup, dbf_text); + goto no_cleanup; + } + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || + irq_ptr->state == QDIO_IRQ_STATE_ERR, + 10 * HZ); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + +no_cleanup: + qdio_shutdown_thinint(irq_ptr); + + /* restore interrupt handler */ + if ((void *)cdev->handler == (void *)qdio_int_handler) + cdev->handler = irq_ptr->orig_handler; + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + mutex_unlock(&irq_ptr->setup_mutex); + module_put(THIS_MODULE); + if (rc) + return rc; + return 0; +} +EXPORT_SYMBOL_GPL(qdio_shutdown); + +/** + * qdio_free - free data structures for a qdio subchannel + * @cdev: associated ccw device + */ +int qdio_free(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + mutex_lock(&irq_ptr->setup_mutex); + + sprintf(dbf_text, "qfqs%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + cdev->private->qdio_data = NULL; + mutex_unlock(&irq_ptr->setup_mutex); + + qdio_release_memory(irq_ptr); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_free); + +/** + * qdio_initialize - allocate and establish queues for a qdio subchannel + * @init_data: initialization data + * + * This function first allocates queues via qdio_allocate() and on success + * establishes them via qdio_establish(). + */ +int qdio_initialize(struct qdio_initialize *init_data) +{ + int rc; + char dbf_text[15]; + + sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + rc = qdio_allocate(init_data); + if (rc) + return rc; + + rc = qdio_establish(init_data); + if (rc) + qdio_free(init_data->cdev); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_initialize); + +/** + * qdio_allocate - allocate qdio queues and associated data + * @init_data: initialization data + */ +int qdio_allocate(struct qdio_initialize *init_data) +{ + struct qdio_irq *irq_ptr; + char dbf_text[15]; + + sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + if ((init_data->no_input_qs && !init_data->input_handler) || + (init_data->no_output_qs && !init_data->output_handler)) + return -EINVAL; + + if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || + (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) + return -EINVAL; + + if ((!init_data->input_sbal_addr_array) || + (!init_data->output_sbal_addr_array)) + return -EINVAL; + + qdio_allocate_do_dbf(init_data); + + /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ + irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!irq_ptr) + goto out_err; + QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); + QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); + + mutex_init(&irq_ptr->setup_mutex); + + /* + * Allocate a page for the chsc calls in qdio_establish. + * Must be pre-allocated since a zfcp recovery will call + * qdio_establish. In case of low memory and swap on a zfcp disk + * we may not be able to allocate memory otherwise. + */ + irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); + if (!irq_ptr->chsc_page) + goto out_rel; + + /* qdr is used in ccw1.cda which is u32 */ + irq_ptr->qdr = kzalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); + if (!irq_ptr->qdr) + goto out_rel; + WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); + + QDIO_DBF_TEXT0(0, setup, "qdr:"); + QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); + + if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, + init_data->no_output_qs)) + goto out_rel; + + init_data->cdev->private->qdio_data = irq_ptr; + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); + return 0; +out_rel: + qdio_release_memory(irq_ptr); +out_err: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(qdio_allocate); + +/** + * qdio_establish - establish queues on a qdio subchannel + * @init_data: initialization data + */ +int qdio_establish(struct qdio_initialize *init_data) +{ + char dbf_text[20]; + struct qdio_irq *irq_ptr; + struct ccw_device *cdev = init_data->cdev; + unsigned long saveflags; + int rc; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + if (!try_module_get(THIS_MODULE)) + return -EINVAL; + + sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_TEXT0(0, trace, dbf_text); + + mutex_lock(&irq_ptr->setup_mutex); + qdio_setup_irq(init_data); + + rc = qdio_establish_thinint(irq_ptr); + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + /* establish q */ + irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->equeue.count; + irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options_mask(cdev, 0); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); + if (rc) { + sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + sprintf(dbf_text, "eq:rc%4x", rc); + QDIO_DBF_TEXT2(1, setup, dbf_text); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return rc; + } + + wait_event_interruptible_timeout(cdev->private->wait_q, + irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || + irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); + + if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; + } + + qdio_setup_ssqd_info(irq_ptr); + sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); + QDIO_DBF_TEXT2(0, setup, dbf_text); + + /* qebsm is now setup if available, initialize buffer states */ + qdio_init_buf_states(irq_ptr); + + mutex_unlock(&irq_ptr->setup_mutex); + qdio_print_subchannel_info(irq_ptr, cdev); + qdio_setup_debug_entries(irq_ptr, cdev); + return 0; +} +EXPORT_SYMBOL_GPL(qdio_establish); + +/** + * qdio_activate - activate queues on a qdio subchannel + * @cdev: associated cdev + */ +int qdio_activate(struct ccw_device *cdev) +{ + struct qdio_irq *irq_ptr; + int rc; + unsigned long saveflags; + char dbf_text[20]; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + + if (cdev->private->state != DEV_STATE_ONLINE) + return -EINVAL; + + mutex_lock(&irq_ptr->setup_mutex); + if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { + rc = -EBUSY; + goto out; + } + + sprintf(dbf_text, "qact%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(0, setup, dbf_text); + QDIO_DBF_TEXT2(0, trace, dbf_text); + + irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; + irq_ptr->ccw.flags = CCW_FLAG_SLI; + irq_ptr->ccw.count = irq_ptr->aqueue.count; + irq_ptr->ccw.cda = 0; + + spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); + ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); + + rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, + 0, DOIO_DENY_PREFETCH); + if (rc) { + sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(1, setup, dbf_text); + sprintf(dbf_text, "aq:rc%4x", rc); + QDIO_DBF_TEXT2(1, setup, dbf_text); + } + spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); + + if (rc) + goto out; + + if (is_thinint_irq(irq_ptr)) + tiqdio_add_input_queues(irq_ptr); + + /* wait for subchannel to become active */ + msleep(5); + + switch (irq_ptr->state) { + case QDIO_IRQ_STATE_STOPPED: + case QDIO_IRQ_STATE_ERR: + mutex_unlock(&irq_ptr->setup_mutex); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); + return -EIO; + default: + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); + rc = 0; + } +out: + mutex_unlock(&irq_ptr->setup_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(qdio_activate); + +static inline int buf_in_between(int bufnr, int start, int count) +{ + int end = add_buf(start, count); + + if (end > start) { + if (bufnr >= start && bufnr < end) + return 1; + else + return 0; + } + + /* wrap-around case */ + if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || + (bufnr < end)) + return 1; + else + return 0; +} + +/** + * handle_inbound - reset processed input buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are emptied + */ +static void handle_inbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + unsigned long flags; + int used, rc; + + /* + * do_QDIO could run in parallel with the queue tasklet so the + * upper-layer programm could empty the ACK'ed buffer here. + * If that happens we must clear the polling flag, otherwise + * qdio_stop_polling() could set the buffer to NOT_INIT after + * it was set to EMPTY which would kill us. + */ + spin_lock_irqsave(&q->u.in.lock, flags); + if (q->u.in.polling) + if (buf_in_between(q->last_move_ftc, bufnr, count)) + q->u.in.polling = 0; + + count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); + spin_unlock_irqrestore(&q->u.in.lock, flags); + + used = atomic_add_return(count, &q->nr_buf_used) - count; + BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); + + /* no need to signal as long as the adapter had free buffers */ + if (used) + return; + + if (need_siga_in(q)) { + rc = qdio_siga_input(q); + if (rc) + q->qdio_error = rc; + } +} + +/** + * handle_outbound - process filled outbound buffers + * @q: queue containing the buffers + * @callflags: flags + * @bufnr: first buffer to process + * @count: how many buffers are filled + */ +static void handle_outbound(struct qdio_q *q, unsigned int callflags, + int bufnr, int count) +{ + unsigned char state; + int used; + + qdio_perf_stat_inc(&perf_stats.outbound_handler); + + count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); + used = atomic_add_return(count, &q->nr_buf_used); + BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); + + if (callflags & QDIO_FLAG_PCI_OUT) + q->u.out.pci_out_enabled = 1; + else + q->u.out.pci_out_enabled = 0; + + if (queue_type(q) == QDIO_IQDIO_QFMT) { + if (multicast_outbound(q)) + qdio_kick_outbound_q(q); + else + /* + * One siga-w per buffer required for unicast + * HiperSockets. + */ + while (count--) + qdio_kick_outbound_q(q); + goto out; + } + + if (need_siga_sync(q)) { + qdio_siga_sync_q(q); + goto out; + } + + /* try to fast requeue buffers */ + get_buf_state(q, prev_buf(bufnr), &state); + if (state != SLSB_CU_OUTPUT_PRIMED) + qdio_kick_outbound_q(q); + else { + QDIO_DBF_TEXT5(0, trace, "fast-req"); + qdio_perf_stat_inc(&perf_stats.fast_requeue); + } +out: + /* Fixme: could wait forever if called from process context */ + tasklet_schedule(&q->tasklet); +} + +/** + * do_QDIO - process input or output buffers + * @cdev: associated ccw_device for the qdio subchannel + * @callflags: input or output and special flags from the program + * @q_nr: queue number + * @bufnr: buffer number + * @count: how many buffers to process + */ +int do_QDIO(struct ccw_device *cdev, unsigned int callflags, + int q_nr, int bufnr, int count) +{ + struct qdio_irq *irq_ptr; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[20]; + + sprintf(dbf_text, "doQD%04x", cdev->private->schid.sch_no); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || + (count > QDIO_MAX_BUFFERS_PER_Q) || + (q_nr > QDIO_MAX_QUEUES_PER_IRQ)) + return -EINVAL; + + if (!count) + return 0; + + irq_ptr = cdev->private->qdio_data; + if (!irq_ptr) + return -ENODEV; + +#ifdef CONFIG_QDIO_DEBUG + if (callflags & QDIO_FLAG_SYNC_INPUT) + QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], + sizeof(void *)); + else + QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], + sizeof(void *)); + + sprintf(dbf_text, "flag%04x", callflags); + QDIO_DBF_TEXT3(0, trace, dbf_text); + sprintf(dbf_text, "qi%02xct%02x", bufnr, count); + QDIO_DBF_TEXT3(0, trace, dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) + return -EBUSY; + + if (callflags & QDIO_FLAG_SYNC_INPUT) + handle_inbound(irq_ptr->input_qs[q_nr], + callflags, bufnr, count); + else if (callflags & QDIO_FLAG_SYNC_OUTPUT) + handle_outbound(irq_ptr->output_qs[q_nr], + callflags, bufnr, count); + else { + QDIO_DBF_TEXT3(1, trace, "doQD:inv"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL_GPL(do_QDIO); + +static int __init init_QDIO(void) +{ + int rc; + + rc = qdio_setup_init(); + if (rc) + return rc; + rc = tiqdio_allocate_memory(); + if (rc) + goto out_cache; + rc = qdio_debug_init(); + if (rc) + goto out_ti; + rc = qdio_setup_perf_stats(); + if (rc) + goto out_debug; + rc = tiqdio_register_thinints(); + if (rc) + goto out_perf; + return 0; + +out_perf: + qdio_remove_perf_stats(); +out_debug: + qdio_debug_exit(); +out_ti: + tiqdio_free_memory(); +out_cache: + qdio_setup_exit(); + return rc; +} + +static void __exit exit_QDIO(void) +{ + tiqdio_unregister_thinints(); + tiqdio_free_memory(); + qdio_remove_perf_stats(); + qdio_debug_exit(); + qdio_setup_exit(); +} + +module_init(init_QDIO); +module_exit(exit_QDIO); diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c new file mode 100644 index 000000000000..ea01b85b1cc9 --- /dev/null +++ b/drivers/s390/cio/qdio_perf.c @@ -0,0 +1,151 @@ +/* + * drivers/s390/cio/qdio_perf.c + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#include <linux/kernel.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <asm/ccwdev.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "ioasm.h" +#include "chsc.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +int qdio_performance_stats; +struct qdio_perf_stats perf_stats; + +#ifdef CONFIG_PROC_FS +static struct proc_dir_entry *qdio_perf_pde; +#endif + +inline void qdio_perf_stat_inc(atomic_long_t *count) +{ + if (qdio_performance_stats) + atomic_long_inc(count); +} + +inline void qdio_perf_stat_dec(atomic_long_t *count) +{ + if (qdio_performance_stats) + atomic_long_dec(count); +} + +/* + * procfs functions + */ +static int qdio_perf_proc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.qdio_int)); + seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.pci_int)); + seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.thin_int)); + seq_printf(m, "\n"); + seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.tasklet_inbound)); + seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.tasklet_outbound)); + seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.tasklet_thinint), + (long)atomic_long_read(&perf_stats.tasklet_thinint_loop)); + seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n", + (long)atomic_long_read(&perf_stats.thinint_inbound), + (long)atomic_long_read(&perf_stats.thinint_inbound_loop)); + seq_printf(m, "\n"); + seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_in)); + seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_out)); + seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.siga_sync)); + seq_printf(m, "\n"); + seq_printf(m, "Number of inbound transfers\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.inbound_handler)); + seq_printf(m, "Number of outbound transfers\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.outbound_handler)); + seq_printf(m, "\n"); + seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", + (long)atomic_long_read(&perf_stats.fast_requeue)); + seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", + (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); + seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", + (long)atomic_long_read(&perf_stats.debug_stop_polling)); + seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", + (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); + seq_printf(m, "\n"); + return 0; +} +static int qdio_perf_seq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, qdio_perf_proc_show, NULL); +} + +static struct file_operations qdio_perf_proc_fops = { + .owner = THIS_MODULE, + .open = qdio_perf_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * sysfs functions + */ +static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf) +{ + return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0); +} + +static ssize_t qdio_perf_stats_store(struct bus_type *bus, + const char *buf, size_t count) +{ + unsigned long i; + + if (strict_strtoul(buf, 16, &i) != 0) + return -EINVAL; + if ((i != 0) && (i != 1)) + return -EINVAL; + if (i == qdio_performance_stats) + return count; + + qdio_performance_stats = i; + /* reset performance statistics */ + if (i == 0) + memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); + return count; +} + +static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show, + qdio_perf_stats_store); + +int __init qdio_setup_perf_stats(void) +{ + int rc; + + rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); + if (rc) + return rc; + +#ifdef CONFIG_PROC_FS + memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); + qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO, + NULL, &qdio_perf_proc_fops); +#endif + return 0; +} + +void __exit qdio_remove_perf_stats(void) +{ +#ifdef CONFIG_PROC_FS + remove_proc_entry("qdio_perf", NULL); +#endif + bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats); +} diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h new file mode 100644 index 000000000000..5c406a8b7387 --- /dev/null +++ b/drivers/s390/cio/qdio_perf.h @@ -0,0 +1,54 @@ +/* + * drivers/s390/cio/qdio_perf.h + * + * Copyright IBM Corp. 2008 + * + * Author: Jan Glauber (jang@linux.vnet.ibm.com) + */ +#ifndef QDIO_PERF_H +#define QDIO_PERF_H + +#include <linux/types.h> +#include <linux/device.h> +#include <asm/atomic.h> + +struct qdio_perf_stats { + /* interrupt handler calls */ + atomic_long_t qdio_int; + atomic_long_t pci_int; + atomic_long_t thin_int; + + /* tasklet runs */ + atomic_long_t tasklet_inbound; + atomic_long_t tasklet_outbound; + atomic_long_t tasklet_thinint; + atomic_long_t tasklet_thinint_loop; + atomic_long_t thinint_inbound; + atomic_long_t thinint_inbound_loop; + atomic_long_t thinint_inbound_loop2; + + /* signal adapter calls */ + atomic_long_t siga_out; + atomic_long_t siga_in; + atomic_long_t siga_sync; + + /* misc */ + atomic_long_t inbound_handler; + atomic_long_t outbound_handler; + atomic_long_t fast_requeue; + + /* for debugging */ + atomic_long_t debug_tl_out_timer; + atomic_long_t debug_stop_polling; +}; + +extern struct qdio_perf_stats perf_stats; +extern int qdio_performance_stats; + +int qdio_setup_perf_stats(void); +void qdio_remove_perf_stats(void); + +extern void qdio_perf_stat_inc(atomic_long_t *count); +extern void qdio_perf_stat_dec(atomic_long_t *count); + +#endif diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c new file mode 100644 index 000000000000..f0923a8aceda --- /dev/null +++ b/drivers/s390/cio/qdio_setup.c @@ -0,0 +1,521 @@ +/* + * driver/s390/cio/qdio_setup.c + * + * qdio queue initialization + * + * Copyright (C) IBM Corp. 2008 + * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "ioasm.h" +#include "chsc.h" +#include "qdio.h" +#include "qdio_debug.h" + +static struct kmem_cache *qdio_q_cache; + +/* + * qebsm is only available under 64bit but the adapter sets the feature + * flag anyway, so we manually override it. + */ +static inline int qebsm_possible(void) +{ +#ifdef CONFIG_64BIT + return css_general_characteristics.qebsm; +#endif + return 0; +} + +/* + * qib_param_field: pointer to 128 bytes or NULL, if no param field + * nr_input_qs: pointer to nr_queues*128 words of data or NULL + */ +static void set_impl_params(struct qdio_irq *irq_ptr, + unsigned int qib_param_field_format, + unsigned char *qib_param_field, + unsigned long *input_slib_elements, + unsigned long *output_slib_elements) +{ + struct qdio_q *q; + int i, j; + + if (!irq_ptr) + return; + + WARN_ON((unsigned long)&irq_ptr->qib & 0xff); + irq_ptr->qib.pfmt = qib_param_field_format; + if (qib_param_field) + memcpy(irq_ptr->qib.parm, qib_param_field, + QDIO_MAX_BUFFERS_PER_Q); + + if (!input_slib_elements) + goto output; + + for_each_input_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +output: + if (!output_slib_elements) + return; + + for_each_output_queue(irq_ptr, q, i) { + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->slib->slibe[j].parms = + output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; + } +} + +static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) +{ + struct qdio_q *q; + int i; + + for (i = 0; i < nr_queues; i++) { + q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); + if (!q) + return -ENOMEM; + WARN_ON((unsigned long)q & 0xff); + + q->slib = (struct slib *) __get_free_page(GFP_KERNEL); + if (!q->slib) { + kmem_cache_free(qdio_q_cache, q); + return -ENOMEM; + } + WARN_ON((unsigned long)q->slib & 0x7ff); + irq_ptr_qs[i] = q; + } + return 0; +} + +int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) +{ + int rc; + + rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); + if (rc) + return rc; + rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); + return rc; +} + +static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, + qdio_handler_t *handler, int i) +{ + /* must be cleared by every qdio_establish */ + memset(q, 0, ((char *)&q->slib) - ((char *)q)); + memset(q->slib, 0, PAGE_SIZE); + + q->irq_ptr = irq_ptr; + q->mask = 1 << (31 - i); + q->nr = i; + q->handler = handler; +} + +static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, + void **sbals_array, char *dbf_text, int i) +{ + struct qdio_q *prev; + int j; + + QDIO_DBF_TEXT0(0, setup, dbf_text); + QDIO_DBF_HEX0(0, setup, &q, sizeof(void *)); + + q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); + + /* fill in sbal */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { + q->sbal[j] = *sbals_array++; + WARN_ON((unsigned long)q->sbal[j] & 0xff); + } + + /* fill in slib */ + if (i > 0) { + prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] + : irq_ptr->output_qs[i - 1]; + prev->slib->nsliba = (unsigned long)q->slib; + } + + q->slib->sla = (unsigned long)q->sl; + q->slib->slsba = (unsigned long)&q->slsb.val[0]; + + /* fill in sl */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) + q->sl->element[j].sbal = (unsigned long)q->sbal[j]; + + QDIO_DBF_TEXT2(0, setup, "sl-sb-b0"); + QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *)); + QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *)); + QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *)); +} + +static void setup_queues(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + char dbf_text[20]; + struct qdio_q *q; + void **input_sbal_array = qdio_init->input_sbal_addr_array; + void **output_sbal_array = qdio_init->output_sbal_addr_array; + int i; + + sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + for_each_input_queue(irq_ptr, q, i) { + sprintf(dbf_text, "in-q%4x", i); + setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); + + q->is_input_q = 1; + spin_lock_init(&q->u.in.lock); + setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i); + input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + if (is_thinint_irq(irq_ptr)) + tasklet_init(&q->tasklet, tiqdio_inbound_processing, + (unsigned long) q); + else + tasklet_init(&q->tasklet, qdio_inbound_processing, + (unsigned long) q); + } + + for_each_output_queue(irq_ptr, q, i) { + sprintf(dbf_text, "outq%4x", i); + setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); + + q->is_input_q = 0; + setup_storage_lists(q, irq_ptr, output_sbal_array, + dbf_text, i); + output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; + + tasklet_init(&q->tasklet, qdio_outbound_processing, + (unsigned long) q); + setup_timer(&q->u.out.timer, (void(*)(unsigned long)) + &qdio_outbound_timer, (unsigned long)q); + } +} + +static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) +{ + if (qdioac & AC1_SIGA_INPUT_NEEDED) + irq_ptr->siga_flag.input = 1; + if (qdioac & AC1_SIGA_OUTPUT_NEEDED) + irq_ptr->siga_flag.output = 1; + if (qdioac & AC1_SIGA_SYNC_NEEDED) + irq_ptr->siga_flag.sync = 1; + if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) + irq_ptr->siga_flag.no_sync_ti = 1; + if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) + irq_ptr->siga_flag.no_sync_out_pci = 1; + + if (irq_ptr->siga_flag.no_sync_out_pci && + irq_ptr->siga_flag.no_sync_ti) + irq_ptr->siga_flag.no_sync_out_ti = 1; +} + +static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, + unsigned char qdioac, unsigned long token) +{ + char dbf_text[15]; + + if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) + goto no_qebsm; + if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || + (!(qdioac & AC1_SC_QEBSM_ENABLED))) + goto no_qebsm; + + irq_ptr->sch_token = token; + + QDIO_DBF_TEXT0(0, setup, "V=V:1"); + sprintf(dbf_text, "%8lx", irq_ptr->sch_token); + QDIO_DBF_TEXT0(0, setup, dbf_text); + return; + +no_qebsm: + irq_ptr->sch_token = 0; + irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; + QDIO_DBF_TEXT0(0, setup, "noV=V"); +} + +static int __get_ssqd_info(struct qdio_irq *irq_ptr) +{ + struct chsc_ssqd_area *ssqd; + int rc; + + QDIO_DBF_TEXT0(0, setup, "getssqd"); + ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; + memset(ssqd, 0, PAGE_SIZE); + + ssqd->request = (struct chsc_header) { + .length = 0x0010, + .code = 0x0024, + }; + ssqd->first_sch = irq_ptr->schid.sch_no; + ssqd->last_sch = irq_ptr->schid.sch_no; + ssqd->ssid = irq_ptr->schid.ssid; + + if (chsc(ssqd)) + return -EIO; + rc = chsc_error_from_response(ssqd->response.code); + if (rc) + return rc; + + if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || + !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || + (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no)) + return -EINVAL; + + memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, + sizeof(struct qdio_ssqd_desc)); + return 0; +} + +void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) +{ + unsigned char qdioac; + char dbf_text[15]; + int rc; + + rc = __get_ssqd_info(irq_ptr); + if (rc) { + QDIO_DBF_TEXT2(0, setup, "ssqdasig"); + sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no); + QDIO_DBF_TEXT2(0, setup, dbf_text); + sprintf(dbf_text, "rc:%d", rc); + QDIO_DBF_TEXT2(0, setup, dbf_text); + /* all flags set, worst case */ + qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | + AC1_SIGA_SYNC_NEEDED; + } else + qdioac = irq_ptr->ssqd_desc.qdioac1; + + check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); + process_ac_flags(irq_ptr, qdioac); + + sprintf(dbf_text, "qdioac%2x", qdioac); + QDIO_DBF_TEXT2(0, setup, dbf_text); +} + +void qdio_release_memory(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + /* + * Must check queue array manually since irq_ptr->nr_input_queues / + * irq_ptr->nr_input_queues may not yet be set. + */ + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->input_qs[i]; + if (q) { + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { + q = irq_ptr->output_qs[i]; + if (q) { + free_page((unsigned long) q->slib); + kmem_cache_free(qdio_q_cache, q); + } + } + kfree(irq_ptr->qdr); + free_page(irq_ptr->chsc_page); + free_page((unsigned long) irq_ptr); +} + +static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, + struct qdio_q **irq_ptr_qs, + int i, int nr) +{ + irq_ptr->qdr->qdf0[i + nr].sliba = + (unsigned long)irq_ptr_qs[i]->slib; + + irq_ptr->qdr->qdf0[i + nr].sla = + (unsigned long)irq_ptr_qs[i]->sl; + + irq_ptr->qdr->qdf0[i + nr].slsba = + (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; + + irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; + irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; +} + +static void setup_qdr(struct qdio_irq *irq_ptr, + struct qdio_initialize *qdio_init) +{ + int i; + + irq_ptr->qdr->qfmt = qdio_init->q_format; + irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; + irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; + irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ + irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; + irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; + irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; + + for (i = 0; i < qdio_init->no_input_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); + + for (i = 0; i < qdio_init->no_output_qs; i++) + __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, + qdio_init->no_input_qs); +} + +static void setup_qib(struct qdio_irq *irq_ptr, + struct qdio_initialize *init_data) +{ + if (qebsm_possible()) + irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; + + irq_ptr->qib.qfmt = init_data->q_format; + if (init_data->no_input_qs) + irq_ptr->qib.isliba = + (unsigned long)(irq_ptr->input_qs[0]->slib); + if (init_data->no_output_qs) + irq_ptr->qib.osliba = + (unsigned long)(irq_ptr->output_qs[0]->slib); + memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); +} + +int qdio_setup_irq(struct qdio_initialize *init_data) +{ + struct ciw *ciw; + struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; + int rc; + + memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); + /* wipes qib.ac, required by ar7063 */ + memset(irq_ptr->qdr, 0, sizeof(struct qdr)); + + irq_ptr->int_parm = init_data->int_parm; + irq_ptr->nr_input_qs = init_data->no_input_qs; + irq_ptr->nr_output_qs = init_data->no_output_qs; + + irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); + irq_ptr->cdev = init_data->cdev; + setup_queues(irq_ptr, init_data); + + setup_qib(irq_ptr, init_data); + qdio_setup_thinint(irq_ptr); + set_impl_params(irq_ptr, init_data->qib_param_field_format, + init_data->qib_param_field, + init_data->input_slib_elements, + init_data->output_slib_elements); + + /* fill input and output descriptors */ + setup_qdr(irq_ptr, init_data); + + /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ + + /* get qdio commands */ + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); + if (!ciw) { + QDIO_DBF_TEXT2(1, setup, "no eq"); + rc = -EINVAL; + goto out_err; + } + irq_ptr->equeue = *ciw; + + ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); + if (!ciw) { + QDIO_DBF_TEXT2(1, setup, "no aq"); + rc = -EINVAL; + goto out_err; + } + irq_ptr->aqueue = *ciw; + + /* set new interrupt handler */ + irq_ptr->orig_handler = init_data->cdev->handler; + init_data->cdev->handler = qdio_int_handler; + return 0; +out_err: + qdio_release_memory(irq_ptr); + return rc; +} + +void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, + struct ccw_device *cdev) +{ + char s[80]; + + sprintf(s, "%s ", cdev->dev.bus_id); + + switch (irq_ptr->qib.qfmt) { + case QDIO_QETH_QFMT: + sprintf(s + strlen(s), "OSADE "); + break; + case QDIO_ZFCP_QFMT: + sprintf(s + strlen(s), "ZFCP "); + break; + case QDIO_IQDIO_QFMT: + sprintf(s + strlen(s), "HiperSockets "); + break; + } + sprintf(s + strlen(s), "using: "); + + if (!is_thinint_irq(irq_ptr)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "AdapterInterrupts "); + if (!(irq_ptr->sch_token != 0)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "QEBSM "); + if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "OutboundPCI "); + if (!css_general_characteristics.aif_tdd) + sprintf(s + strlen(s), "no"); + sprintf(s + strlen(s), "TDD\n"); + printk(KERN_INFO "qdio: %s", s); + + memset(s, 0, sizeof(s)); + sprintf(s, "%s SIGA required: ", cdev->dev.bus_id); + if (irq_ptr->siga_flag.input) + sprintf(s + strlen(s), "Read "); + if (irq_ptr->siga_flag.output) + sprintf(s + strlen(s), "Write "); + if (irq_ptr->siga_flag.sync) + sprintf(s + strlen(s), "Sync "); + if (!irq_ptr->siga_flag.no_sync_ti) + sprintf(s + strlen(s), "SyncAI "); + if (!irq_ptr->siga_flag.no_sync_out_ti) + sprintf(s + strlen(s), "SyncOutAI "); + if (!irq_ptr->siga_flag.no_sync_out_pci) + sprintf(s + strlen(s), "SyncOutPCI"); + sprintf(s + strlen(s), "\n"); + printk(KERN_INFO "qdio: %s", s); +} + +int __init qdio_setup_init(void) +{ + char dbf_text[15]; + + qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), + 256, 0, NULL); + if (!qdio_q_cache) + return -ENOMEM; + + /* Check for OSA/FCP thin interrupts (bit 67). */ + sprintf(dbf_text, "thini%1x", + (css_general_characteristics.aif_osa) ? 1 : 0); + QDIO_DBF_TEXT0(0, setup, dbf_text); + + /* Check for QEBSM support in general (bit 58). */ + sprintf(dbf_text, "cssQBS:%1x", + (qebsm_possible()) ? 1 : 0); + QDIO_DBF_TEXT0(0, setup, dbf_text); + return 0; +} + +void __exit qdio_setup_exit(void) +{ + kmem_cache_destroy(qdio_q_cache); +} diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c new file mode 100644 index 000000000000..9291a771d812 --- /dev/null +++ b/drivers/s390/cio/qdio_thinint.c @@ -0,0 +1,380 @@ +/* + * linux/drivers/s390/cio/thinint_qdio.c + * + * thin interrupt support for qdio + * + * Copyright 2000-2008 IBM Corp. + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Cornelia Huck <cornelia.huck@de.ibm.com> + * Jan Glauber <jang@linux.vnet.ibm.com> + */ +#include <linux/io.h> +#include <asm/atomic.h> +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/airq.h> +#include <asm/isc.h> + +#include "cio.h" +#include "ioasm.h" +#include "qdio.h" +#include "qdio_debug.h" +#include "qdio_perf.h" + +/* + * Restriction: only 63 iqdio subchannels would have its own indicator, + * after that, subsequent subchannels share one indicator + */ +#define TIQDIO_NR_NONSHARED_IND 63 +#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) +#define TIQDIO_SHARED_IND 63 + +/* list of thin interrupt input queues */ +static LIST_HEAD(tiq_list); + +/* adapter local summary indicator */ +static unsigned char *tiqdio_alsi; + +/* device state change indicators */ +struct indicator_t { + u32 ind; /* u32 because of compare-and-swap performance */ + atomic_t count; /* use count, 0 or 1 for non-shared indicators */ +}; +static struct indicator_t *q_indicators; + +static void tiqdio_tasklet_fn(unsigned long data); +static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); + +static int css_qdio_omit_svs; + +static inline unsigned long do_clear_global_summary(void) +{ + register unsigned long __fn asm("1") = 3; + register unsigned long __tmp asm("2"); + register unsigned long __time asm("3"); + + asm volatile( + " .insn rre,0xb2650000,2,0" + : "+d" (__fn), "=d" (__tmp), "=d" (__time)); + return __time; +} + +/* returns addr for the device state change indicator */ +static u32 *get_indicator(void) +{ + int i; + + for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) + if (!atomic_read(&q_indicators[i].count)) { + atomic_set(&q_indicators[i].count, 1); + return &q_indicators[i].ind; + } + + /* use the shared indicator */ + atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); + return &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static void put_indicator(u32 *addr) +{ + int i; + + if (!addr) + return; + i = ((unsigned long)addr - (unsigned long)q_indicators) / + sizeof(struct indicator_t); + atomic_dec(&q_indicators[i].count); +} + +void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + /* No TDD facility? If we must use SIGA-s we can also omit SVS. */ + if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync) + css_qdio_omit_svs = 1; + + for_each_input_queue(irq_ptr, q, i) { + list_add_rcu(&q->entry, &tiq_list); + synchronize_rcu(); + } + xchg(irq_ptr->dsci, 1); + tasklet_schedule(&tiqdio_tasklet); +} + +/* + * we cannot stop the tiqdio tasklet here since it is for all + * thinint qdio devices and it must run as long as there is a + * thinint device left + */ +void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) +{ + struct qdio_q *q; + int i; + + for_each_input_queue(irq_ptr, q, i) { + list_del_rcu(&q->entry); + synchronize_rcu(); + } +} + +static inline int tiqdio_inbound_q_done(struct qdio_q *q) +{ + unsigned char state; + + if (!atomic_read(&q->nr_buf_used)) + return 1; + + qdio_siga_sync_q(q); + get_buf_state(q, q->first_to_check, &state); + + if (state == SLSB_P_INPUT_PRIMED) + /* more work coming */ + return 0; + return 1; +} + +static inline int shared_ind(struct qdio_irq *irq_ptr) +{ + return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; +} + +static void __tiqdio_inbound_processing(struct qdio_q *q) +{ + qdio_perf_stat_inc(&perf_stats.thinint_inbound); + qdio_sync_after_thinint(q); + + /* + * Maybe we have work on our outbound queues... at least + * we have to check the PCI capable queues. + */ + qdio_check_outbound_after_thinint(q); + +again: + if (!qdio_inbound_q_moved(q)) + return; + + qdio_kick_inbound_handler(q); + + if (!tiqdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); + goto again; + } + + qdio_stop_polling(q); + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!tiqdio_inbound_q_done(q)) { + qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); + goto again; + } +} + +void tiqdio_inbound_processing(unsigned long data) +{ + struct qdio_q *q = (struct qdio_q *)data; + + __tiqdio_inbound_processing(q); +} + +/* check for work on all inbound thinint queues */ +static void tiqdio_tasklet_fn(unsigned long data) +{ + struct qdio_q *q; + + qdio_perf_stat_inc(&perf_stats.tasklet_thinint); +again: + + /* protect tiq_list entries, only changed in activate or shutdown */ + rcu_read_lock(); + + list_for_each_entry_rcu(q, &tiq_list, entry) + /* only process queues from changed sets */ + if (*q->irq_ptr->dsci) { + + /* only clear it if the indicator is non-shared */ + if (!shared_ind(q->irq_ptr)) + xchg(q->irq_ptr->dsci, 0); + /* + * don't call inbound processing directly since + * that could starve other thinint queues + */ + tasklet_schedule(&q->tasklet); + } + + rcu_read_unlock(); + + /* + * if we used the shared indicator clear it now after all queues + * were processed + */ + if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { + xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); + + /* prevent racing */ + if (*tiqdio_alsi) + xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); + } + + /* check for more work */ + if (*tiqdio_alsi) { + xchg(tiqdio_alsi, 0); + qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); + goto again; + } +} + +/** + * tiqdio_thinint_handler - thin interrupt handler for qdio + * @ind: pointer to adapter local summary indicator + * @drv_data: NULL + */ +static void tiqdio_thinint_handler(void *ind, void *drv_data) +{ + qdio_perf_stat_inc(&perf_stats.thin_int); + + /* + * SVS only when needed: issue SVS to benefit from iqdio interrupt + * avoidance (SVS clears adapter interrupt suppression overwrite) + */ + if (!css_qdio_omit_svs) + do_clear_global_summary(); + + /* + * reset local summary indicator (tiqdio_alsi) to stop adapter + * interrupts for now, the tasklet will clean all dsci's + */ + xchg((u8 *)ind, 0); + tasklet_hi_schedule(&tiqdio_tasklet); +} + +static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) +{ + struct scssc_area *scssc_area; + char dbf_text[15]; + void *ptr; + int rc; + + scssc_area = (struct scssc_area *)irq_ptr->chsc_page; + memset(scssc_area, 0, PAGE_SIZE); + + if (reset) { + scssc_area->summary_indicator_addr = 0; + scssc_area->subchannel_indicator_addr = 0; + } else { + scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); + scssc_area->subchannel_indicator_addr = + virt_to_phys(irq_ptr->dsci); + } + + scssc_area->request = (struct chsc_header) { + .length = 0x0fe0, + .code = 0x0021, + }; + scssc_area->operation_code = 0; + scssc_area->ks = PAGE_DEFAULT_KEY; + scssc_area->kc = PAGE_DEFAULT_KEY; + scssc_area->isc = QDIO_AIRQ_ISC; + scssc_area->schid = irq_ptr->schid; + + /* enable the time delay disablement facility */ + if (css_general_characteristics.aif_tdd) + scssc_area->word_with_d_bit = 0x10000000; + + rc = chsc(scssc_area); + if (rc) + return -EIO; + + rc = chsc_error_from_response(scssc_area->response.code); + if (rc) { + sprintf(dbf_text, "sidR%4x", scssc_area->response.code); + QDIO_DBF_TEXT1(0, trace, dbf_text); + QDIO_DBF_TEXT1(0, setup, dbf_text); + ptr = &scssc_area->response; + QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); + return rc; + } + + QDIO_DBF_TEXT2(0, setup, "setscind"); + QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, + sizeof(unsigned long)); + QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, + sizeof(unsigned long)); + return 0; +} + +/* allocate non-shared indicators and shared indicator */ +int __init tiqdio_allocate_memory(void) +{ + q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, + GFP_KERNEL); + if (!q_indicators) + return -ENOMEM; + return 0; +} + +void tiqdio_free_memory(void) +{ + kfree(q_indicators); +} + +int __init tiqdio_register_thinints(void) +{ + char dbf_text[20]; + + isc_register(QDIO_AIRQ_ISC); + tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, + NULL, QDIO_AIRQ_ISC); + if (IS_ERR(tiqdio_alsi)) { + sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); + QDIO_DBF_TEXT0(0, setup, dbf_text); + tiqdio_alsi = NULL; + isc_unregister(QDIO_AIRQ_ISC); + return -ENOMEM; + } + return 0; +} + +int qdio_establish_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return 0; + + /* Check for aif time delay disablement. If installed, + * omit SVS even under LPAR + */ + if (css_general_characteristics.aif_tdd) + css_qdio_omit_svs = 1; + return set_subchannel_ind(irq_ptr, 0); +} + +void qdio_setup_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + irq_ptr->dsci = get_indicator(); + QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); +} + +void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) +{ + if (!is_thinint_irq(irq_ptr)) + return; + + /* reset adapter interrupt indicators */ + put_indicator(irq_ptr->dsci); + set_subchannel_ind(irq_ptr, 1); +} + +void __exit tiqdio_unregister_thinints(void) +{ + tasklet_disable(&tiqdio_tasklet); + + if (tiqdio_alsi) { + s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); + isc_unregister(QDIO_AIRQ_ISC); + } +} diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h deleted file mode 100644 index 54328fec5ade..000000000000 --- a/drivers/s390/cio/schid.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef S390_SCHID_H -#define S390_SCHID_H - -struct subchannel_id { - __u32 reserved:13; - __u32 ssid:2; - __u32 one:1; - __u32 sch_no:16; -} __attribute__ ((packed,aligned(4))); - - -/* Helper function for sane state of pre-allocated subchannel_id. */ -static inline void -init_subchannel_id(struct subchannel_id *schid) -{ - memset(schid, 0, sizeof(struct subchannel_id)); - schid->one = 1; -} - -static inline int -schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2) -{ - return !memcmp(schid1, schid2, sizeof(struct subchannel_id)); -} - -#endif /* S390_SCHID_H */ diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c new file mode 100644 index 000000000000..f8da25ab576d --- /dev/null +++ b/drivers/s390/cio/scsw.c @@ -0,0 +1,843 @@ +/* + * Helper functions for scsw access. + * + * Copyright IBM Corp. 2008 + * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <asm/cio.h> +#include "css.h" +#include "chsc.h" + +/** + * scsw_is_tm - check for transport mode scsw + * @scsw: pointer to scsw + * + * Return non-zero if the specified scsw is a transport mode scsw, zero + * otherwise. + */ +int scsw_is_tm(union scsw *scsw) +{ + return css_general_characteristics.fcx && (scsw->tm.x == 1); +} +EXPORT_SYMBOL(scsw_is_tm); + +/** + * scsw_key - return scsw key field + * @scsw: pointer to scsw + * + * Return the value of the key field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.key; + else + return scsw->cmd.key; +} +EXPORT_SYMBOL(scsw_key); + +/** + * scsw_eswf - return scsw eswf field + * @scsw: pointer to scsw + * + * Return the value of the eswf field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.eswf; + else + return scsw->cmd.eswf; +} +EXPORT_SYMBOL(scsw_eswf); + +/** + * scsw_cc - return scsw cc field + * @scsw: pointer to scsw + * + * Return the value of the cc field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cc; + else + return scsw->cmd.cc; +} +EXPORT_SYMBOL(scsw_cc); + +/** + * scsw_ectl - return scsw ectl field + * @scsw: pointer to scsw + * + * Return the value of the ectl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.ectl; + else + return scsw->cmd.ectl; +} +EXPORT_SYMBOL(scsw_ectl); + +/** + * scsw_pno - return scsw pno field + * @scsw: pointer to scsw + * + * Return the value of the pno field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.pno; + else + return scsw->cmd.pno; +} +EXPORT_SYMBOL(scsw_pno); + +/** + * scsw_fctl - return scsw fctl field + * @scsw: pointer to scsw + * + * Return the value of the fctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.fctl; + else + return scsw->cmd.fctl; +} +EXPORT_SYMBOL(scsw_fctl); + +/** + * scsw_actl - return scsw actl field + * @scsw: pointer to scsw + * + * Return the value of the actl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.actl; + else + return scsw->cmd.actl; +} +EXPORT_SYMBOL(scsw_actl); + +/** + * scsw_stctl - return scsw stctl field + * @scsw: pointer to scsw + * + * Return the value of the stctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.stctl; + else + return scsw->cmd.stctl; +} +EXPORT_SYMBOL(scsw_stctl); + +/** + * scsw_dstat - return scsw dstat field + * @scsw: pointer to scsw + * + * Return the value of the dstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.dstat; + else + return scsw->cmd.dstat; +} +EXPORT_SYMBOL(scsw_dstat); + +/** + * scsw_cstat - return scsw cstat field + * @scsw: pointer to scsw + * + * Return the value of the cstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +u32 scsw_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cstat; + else + return scsw->cmd.cstat; +} +EXPORT_SYMBOL(scsw_cstat); + +/** + * scsw_cmd_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_key(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_key); + +/** + * scsw_cmd_is_valid_sctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_sctl(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_sctl); + +/** + * scsw_cmd_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_eswf(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_eswf); + +/** + * scsw_cmd_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_cc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_cc); + +/** + * scsw_cmd_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_fmt(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_fmt); + +/** + * scsw_cmd_is_valid_pfch - check pfch field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pfch field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_pfch(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_pfch); + +/** + * scsw_cmd_is_valid_isic - check isic field validity + * @scsw: pointer to scsw + * + * Return non-zero if the isic field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_isic(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_isic); + +/** + * scsw_cmd_is_valid_alcc - check alcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the alcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_alcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_alcc); + +/** + * scsw_cmd_is_valid_ssi - check ssi field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ssi field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_ssi(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_ssi); + +/** + * scsw_cmd_is_valid_zcc - check zcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the zcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_zcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_zcc); + +/** + * scsw_cmd_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_ectl(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_ectl); + +/** + * scsw_cmd_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_pno(union scsw *scsw) +{ + return (scsw->cmd.fctl != 0) && + (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || + ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_pno); + +/** + * scsw_cmd_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_cmd_is_valid_fctl); + +/** + * scsw_cmd_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_cmd_is_valid_actl); + +/** + * scsw_cmd_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_cmd_is_valid_stctl); + +/** + * scsw_cmd_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_dstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_dstat); + +/** + * scsw_cmd_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +int scsw_cmd_is_valid_cstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} +EXPORT_SYMBOL(scsw_cmd_is_valid_cstat); + +/** + * scsw_tm_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_key(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); +} +EXPORT_SYMBOL(scsw_tm_is_valid_key); + +/** + * scsw_tm_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_eswf(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_tm_is_valid_eswf); + +/** + * scsw_tm_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_cc(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && + (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} +EXPORT_SYMBOL(scsw_tm_is_valid_cc); + +/** + * scsw_tm_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_fmt(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_fmt); + +/** + * scsw_tm_is_valid_x - check x field validity + * @scsw: pointer to scsw + * + * Return non-zero if the x field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_x(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_x); + +/** + * scsw_tm_is_valid_q - check q field validity + * @scsw: pointer to scsw + * + * Return non-zero if the q field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_q(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_q); + +/** + * scsw_tm_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_ectl(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); +} +EXPORT_SYMBOL(scsw_tm_is_valid_ectl); + +/** + * scsw_tm_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_pno(union scsw *scsw) +{ + return (scsw->tm.fctl != 0) && + (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || + ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); +} +EXPORT_SYMBOL(scsw_tm_is_valid_pno); + +/** + * scsw_tm_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_fctl); + +/** + * scsw_tm_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_actl); + +/** + * scsw_tm_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_stctl); + +/** + * scsw_tm_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_dstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} +EXPORT_SYMBOL(scsw_tm_is_valid_dstat); + +/** + * scsw_tm_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_cstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} +EXPORT_SYMBOL(scsw_tm_is_valid_cstat); + +/** + * scsw_tm_is_valid_fcxs - check fcxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fcxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_fcxs(union scsw *scsw) +{ + return 1; +} +EXPORT_SYMBOL(scsw_tm_is_valid_fcxs); + +/** + * scsw_tm_is_valid_schxs - check schxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the schxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +int scsw_tm_is_valid_schxs(union scsw *scsw) +{ + return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | + SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_PROT_CHECK | + SCHN_STAT_CHN_DATA_CHK)); +} +EXPORT_SYMBOL(scsw_tm_is_valid_schxs); + +/** + * scsw_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_actl(scsw); + else + return scsw_cmd_is_valid_actl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_actl); + +/** + * scsw_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cc(scsw); + else + return scsw_cmd_is_valid_cc(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_cc); + +/** + * scsw_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cstat(scsw); + else + return scsw_cmd_is_valid_cstat(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_cstat); + +/** + * scsw_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_dstat(scsw); + else + return scsw_cmd_is_valid_dstat(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_dstat); + +/** + * scsw_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_ectl(scsw); + else + return scsw_cmd_is_valid_ectl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_ectl); + +/** + * scsw_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_eswf(scsw); + else + return scsw_cmd_is_valid_eswf(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_eswf); + +/** + * scsw_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_fctl(scsw); + else + return scsw_cmd_is_valid_fctl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_fctl); + +/** + * scsw_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_key(scsw); + else + return scsw_cmd_is_valid_key(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_key); + +/** + * scsw_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_pno(scsw); + else + return scsw_cmd_is_valid_pno(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_pno); + +/** + * scsw_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +int scsw_is_valid_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_stctl(scsw); + else + return scsw_cmd_is_valid_stctl(scsw); +} +EXPORT_SYMBOL(scsw_is_valid_stctl); + +/** + * scsw_cmd_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the command mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +int scsw_cmd_is_solicited(union scsw *scsw) +{ + return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} +EXPORT_SYMBOL(scsw_cmd_is_solicited); + +/** + * scsw_tm_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +int scsw_tm_is_solicited(union scsw *scsw) +{ + return (scsw->tm.cc != 0) || (scsw->tm.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} +EXPORT_SYMBOL(scsw_tm_is_solicited); + +/** + * scsw_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport or command mode scsw indicates that the + * associated status condition is solicited, zero if it is unsolicited. + */ +int scsw_is_solicited(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_solicited(scsw); + else + return scsw_cmd_is_solicited(scsw); +} +EXPORT_SYMBOL(scsw_is_solicited); |