From a67417ab7eeff45bba55666c0e1083260f3624ee Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:06 +0200 Subject: [SCSI] zfcp: invalid usage after free of port resources In certain error scenarios ports, rports are getting attached, validated and removed from the systems environment. Depending on the layer this occurs asynchronously. This patch fixes the few races which existed and ensures all references and cross references are cleared at the time they're invalid. In addition fc transports actions are only scheduled when required. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 7 +++++-- drivers/s390/scsi/zfcp_scsi.c | 6 ++++-- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 2ccbd185a5fb..fa2460b42298 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -672,12 +672,15 @@ err_out: */ void zfcp_port_dequeue(struct zfcp_port *port) { - wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); write_lock_irq(&zfcp_data.config_lock); list_del(&port->list); write_unlock_irq(&zfcp_data.config_lock); - if (port->rport) + if (port->rport) { port->rport->dd_data = NULL; + port->rport = NULL; + } + wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); + cancel_work_sync(&port->rport_work); /* usually not necessary */ zfcp_adapter_put(port->adapter); sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); device_unregister(&port->sysfs_device); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 6925a1784682..54a7a7474aa5 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -582,8 +582,10 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) zfcp_port_get(port); port->rport_task = RPORT_DEL; - if (!queue_work(zfcp_data.work_queue, &port->rport_work)) - zfcp_port_put(port); + if (port->rport && queue_work(zfcp_data.work_queue, &port->rport_work)) + return; + + zfcp_port_put(port); } void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) -- cgit v1.2.3 From d46f384a89c2378cb7858747faa1935db17e22a8 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:07 +0200 Subject: [SCSI] zfcp: Move debug data from zfcp_data to own data structure The struct zfcp_adapter includes everything related to the debug traces. This introduces dependences between the definitions in zfcp_def.h and zfcp_dbf.h. Move all debug related data structures to a new data structure to break those dependencies and manage the debug data in zfcp_dbf.[hc]. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 4 - drivers/s390/scsi/zfcp_dbf.c | 217 ++++++++++++++++++++++++------------------- drivers/s390/scsi/zfcp_dbf.h | 17 +++- drivers/s390/scsi/zfcp_def.h | 15 +-- drivers/s390/scsi/zfcp_fsf.h | 3 +- 5 files changed, 143 insertions(+), 113 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index fa2460b42298..870fe79bc7ea 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -502,10 +502,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) spin_lock_init(&adapter->req_list_lock); - spin_lock_init(&adapter->hba_dbf_lock); - spin_lock_init(&adapter->san_dbf_lock); - spin_lock_init(&adapter->scsi_dbf_lock); - spin_lock_init(&adapter->rec_dbf_lock); spin_lock_init(&adapter->req_q_lock); spin_lock_init(&adapter->qdio_stat_lock); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index b99b87ce5a39..995e6128b6c9 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -3,7 +3,7 @@ * * Debug traces for zfcp. * - * Copyright IBM Corporation 2002, 2008 + * Copyright IBM Corporation 2002, 2009 */ #define KMSG_COMPONENT "zfcp" @@ -11,6 +11,7 @@ #include #include +#include "zfcp_dbf.h" #include "zfcp_ext.h" static u32 dbfsize = 4; @@ -126,6 +127,7 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; + struct zfcp_dbf *dbf = adapter->dbf; struct fsf_qtcb *qtcb = fsf_req->qtcb; union fsf_prot_status_qual *prot_status_qual = &qtcb->prefix.prot_status_qual; @@ -134,12 +136,12 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) struct zfcp_port *port; struct zfcp_unit *unit; struct zfcp_send_els *send_els; - struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; + struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; struct zfcp_hba_dbf_record_response *response = &rec->u.response; int level; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); @@ -224,7 +226,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) break; } - debug_event(adapter->hba_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); /* have fcp channel microcode fixed to use as little as possible */ if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { @@ -232,11 +234,11 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) char *buf = (char *)qtcb + qtcb->header.log_start; int len = qtcb->header.log_length; for (; len && !buf[len - 1]; len--); - zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level, - buf, len); + zfcp_dbf_hexdump(dbf->hba_dbf, rec, sizeof(*rec), level, buf, + len); } - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } /** @@ -248,10 +250,11 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, struct fsf_status_read_buffer *status_buffer) { - struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); @@ -293,8 +296,8 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, &status_buffer->payload, rec->u.status.payload_size); } - debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec)); - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + debug_event(dbf->hba_dbf, 2, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } /** @@ -308,17 +311,18 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int qdio_error, int sbal_index, int sbal_count) { - struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); r->u.qdio.qdio_error = qdio_error; r->u.qdio.sbal_index = sbal_index; r->u.qdio.sbal_count = sbal_count; - debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } /** @@ -329,17 +333,18 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req) { - struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; struct fsf_status_read_buffer *sr_buf = req->data; struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; unsigned long flags; - spin_lock_irqsave(&adapter->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); - debug_event(adapter->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); + debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } static void zfcp_hba_dbf_view_response(char **p, struct zfcp_hba_dbf_record_response *r) @@ -554,7 +559,8 @@ static struct debug_view zfcp_rec_dbf_view = { */ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) { - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags = 0; struct list_head *entry; unsigned ready = 0, running = 0, total; @@ -565,15 +571,15 @@ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) running++; total = adapter->erp_total_count; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_THREAD; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); r->u.thread.total = total; r->u.thread.ready = ready; r->u.thread.running = running; - debug_event(adapter->rec_dbf, 6, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, 6, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -596,10 +602,11 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref, atomic_t *status, atomic_t *erp_count, u64 wwpn, u32 d_id, u64 fcp_lun) { - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TARGET; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -609,8 +616,8 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref, r->u.target.d_id = d_id; r->u.target.fcp_lun = fcp_lun; r->u.target.erp_count = atomic_read(erp_count); - debug_event(adapter->rec_dbf, 3, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, 3, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -672,10 +679,11 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, void *action, struct zfcp_adapter *adapter, struct zfcp_port *port, struct zfcp_unit *unit) { - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TRIGGER; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -692,8 +700,8 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, r->u.trigger.us = atomic_read(&unit->status); r->u.trigger.fcp_lun = unit->fcp_lun; } - debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, action ? 1 : 4, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -704,10 +712,11 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_dbf_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_ACTION; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -715,8 +724,8 @@ void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) r->u.action.status = erp_action->status; r->u.action.step = erp_action->step; r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; - debug_event(adapter->rec_dbf, 5, r, sizeof(*r)); - spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags); + debug_event(dbf->rec_dbf, 5, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); } /** @@ -728,13 +737,14 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_dbf *dbf = adapter->dbf; struct ct_hdr *hdr = sg_virt(ct->req); - struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; + struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; int level = 3; unsigned long flags; - spin_lock_irqsave(&adapter->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -749,10 +759,10 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) oct->max_res_size = hdr->max_res_size; oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(adapter->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san_dbf, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), oct->len); - spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); } /** @@ -765,12 +775,13 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct ct_hdr *hdr = sg_virt(ct->resp); - struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; int level = 3; unsigned long flags; - spin_lock_irqsave(&adapter->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -785,10 +796,10 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) rct->max_res_size = hdr->max_res_size; rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(adapter->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san_dbf, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), rct->len); - spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); } static void zfcp_san_dbf_event_els(const char *tag, int level, @@ -797,10 +808,11 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, int buflen) { struct zfcp_adapter *adapter = fsf_req->adapter; - struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_san_dbf_record *rec = &dbf->san_dbf_buf; unsigned long flags; - spin_lock_irqsave(&adapter->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); rec->fsf_reqid = fsf_req->req_id; @@ -808,10 +820,10 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, rec->s_id = s_id; rec->d_id = d_id; rec->u.els.ls_code = ls_code; - debug_event(adapter->san_dbf, level, rec, sizeof(*rec)); - zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level, + debug_event(dbf->san_dbf, level, rec, sizeof(*rec)); + zfcp_dbf_hexdump(dbf->san_dbf, rec, sizeof(*rec), level, buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); - spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); } /** @@ -915,14 +927,15 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, struct zfcp_fsf_req *fsf_req, unsigned long old_req_id) { - struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf; + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_scsi_dbf_record *rec = &dbf->scsi_dbf_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; struct fcp_rsp_iu *fcp_rsp; char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; int offset = 0, buflen = 0; - spin_lock_irqsave(&adapter->scsi_dbf_lock, flags); + spin_lock_irqsave(&dbf->scsi_dbf_lock, flags); do { memset(rec, 0, sizeof(*rec)); if (offset == 0) { @@ -981,9 +994,9 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, memcpy(dump->data, fcp_sns_info + offset, dump->size); offset += dump->size; } - debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->scsi_dbf, level, rec, sizeof(*rec)); } while (offset < buflen); - spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->scsi_dbf_lock, flags); } /** @@ -1087,6 +1100,22 @@ static struct debug_view zfcp_scsi_dbf_view = { NULL }; +static debug_info_t *zfcp_dbf_reg(const char *name, int level, + struct debug_view *view, int size) +{ + struct debug_info *d; + + d = debug_register(name, dbfsize, level, size); + if (!d) + return NULL; + + debug_register_view(d, &debug_hex_ascii_view); + debug_register_view(d, view); + debug_set_level(d, level); + + return d; +} + /** * zfcp_adapter_debug_register - registers debug feature for an adapter * @adapter: pointer to adapter for which debug features should be registered @@ -1095,52 +1124,56 @@ static struct debug_view zfcp_scsi_dbf_view = { int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) { char dbf_name[DEBUG_MAX_NAME_LEN]; + struct zfcp_dbf *dbf; + + dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); + if (!dbf) + return -ENOMEM; + + spin_lock_init(&dbf->hba_dbf_lock); + spin_lock_init(&dbf->san_dbf_lock); + spin_lock_init(&dbf->scsi_dbf_lock); + spin_lock_init(&dbf->rec_dbf_lock); /* debug feature area which records recovery activity */ sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); - adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_rec_dbf_record)); - if (!adapter->rec_dbf) - goto failed; - debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view); - debug_set_level(adapter->rec_dbf, 3); + dbf->rec_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_rec_dbf_view, + sizeof(struct zfcp_rec_dbf_record)); + if (!dbf->rec_dbf) + goto fail_rec; /* debug feature area which records HBA (FSF and QDIO) conditions */ sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); - adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_hba_dbf_record)); - if (!adapter->hba_dbf) - goto failed; - debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view); - debug_set_level(adapter->hba_dbf, 3); + dbf->hba_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_hba_dbf_view, + sizeof(struct zfcp_hba_dbf_record)); + if (!dbf->hba_dbf) + goto fail_hba; /* debug feature area which records SAN command failures and recovery */ sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); - adapter->san_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_san_dbf_record)); - if (!adapter->san_dbf) - goto failed; - debug_register_view(adapter->san_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view); - debug_set_level(adapter->san_dbf, 6); + dbf->san_dbf = zfcp_dbf_reg(dbf_name, 6, &zfcp_san_dbf_view, + sizeof(struct zfcp_san_dbf_record)); + if (!dbf->san_dbf) + goto fail_san; /* debug feature area which records SCSI command failures and recovery */ sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); - adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1, - sizeof(struct zfcp_scsi_dbf_record)); - if (!adapter->scsi_dbf) - goto failed; - debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view); - debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view); - debug_set_level(adapter->scsi_dbf, 3); + dbf->scsi_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_scsi_dbf_view, + sizeof(struct zfcp_scsi_dbf_record)); + if (!dbf->scsi_dbf) + goto fail_scsi; + adapter->dbf = dbf; return 0; - failed: - zfcp_adapter_debug_unregister(adapter); - +fail_scsi: + debug_unregister(dbf->san_dbf); +fail_san: + debug_unregister(dbf->hba_dbf); +fail_hba: + debug_unregister(dbf->rec_dbf); +fail_rec: + kfree(dbf); return -ENOMEM; } @@ -1150,12 +1183,10 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) */ void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) { - debug_unregister(adapter->scsi_dbf); - debug_unregister(adapter->san_dbf); - debug_unregister(adapter->hba_dbf); - debug_unregister(adapter->rec_dbf); - adapter->scsi_dbf = NULL; - adapter->san_dbf = NULL; - adapter->hba_dbf = NULL; - adapter->rec_dbf = NULL; + debug_unregister(adapter->dbf->scsi_dbf); + debug_unregister(adapter->dbf->san_dbf); + debug_unregister(adapter->dbf->hba_dbf); + debug_unregister(adapter->dbf->rec_dbf); + kfree(adapter->dbf); + adapter->dbf = NULL; } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index a573f7344dd6..4cfd68fe8a26 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -2,7 +2,7 @@ * This file is part of the zfcp device driver for * FCP adapters for IBM System z9 and zSeries. * - * Copyright IBM Corp. 2008, 2008 + * Copyright IBM Corp. 2008, 2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -222,4 +222,19 @@ struct zfcp_scsi_dbf_record { u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; } __attribute__ ((packed)); +struct zfcp_dbf { + debug_info_t *rec_dbf; + debug_info_t *hba_dbf; + debug_info_t *san_dbf; + debug_info_t *scsi_dbf; + spinlock_t rec_dbf_lock; + spinlock_t hba_dbf_lock; + spinlock_t san_dbf_lock; + spinlock_t scsi_dbf_lock; + struct zfcp_rec_dbf_record rec_dbf_buf; + struct zfcp_hba_dbf_record hba_dbf_buf; + struct zfcp_san_dbf_record san_dbf_buf; + struct zfcp_scsi_dbf_record scsi_dbf_buf; +}; + #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 49d0532bca1c..601f5a575def 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -37,10 +37,8 @@ #include #include #include -#include "zfcp_dbf.h" #include "zfcp_fsf.h" - /********************* GENERAL DEFINES *********************************/ #define REQUEST_LIST_SIZE 128 @@ -468,18 +466,7 @@ struct zfcp_adapter { u32 erp_low_mem_count; /* nr of erp actions waiting for memory */ struct zfcp_wka_ports *gs; /* generic services */ - debug_info_t *rec_dbf; - debug_info_t *hba_dbf; - debug_info_t *san_dbf; /* debug feature areas */ - debug_info_t *scsi_dbf; - spinlock_t rec_dbf_lock; - spinlock_t hba_dbf_lock; - spinlock_t san_dbf_lock; - spinlock_t scsi_dbf_lock; - struct zfcp_rec_dbf_record rec_dbf_buf; - struct zfcp_hba_dbf_record hba_dbf_buf; - struct zfcp_san_dbf_record san_dbf_buf; - struct zfcp_scsi_dbf_record scsi_dbf_buf; + struct zfcp_dbf *dbf; /* debug traces */ struct zfcp_adapter_mempool pool; /* Adapter memory pools */ struct qdio_initialize qdio_init_data; /* for qdio_establish */ struct fc_host_statistics *fc_stats; diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index df7f232faba8..dcc7c1dbcf58 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -3,13 +3,14 @@ * * Interface to the FSF support functions. * - * Copyright IBM Corporation 2002, 2008 + * Copyright IBM Corporation 2002, 2009 */ #ifndef FSF_H #define FSF_H #include +#include #define FSF_QTCB_CURRENT_VERSION 0x00000001 -- cgit v1.2.3 From dcd20e2316cdc333dfdee09649dbe3642eb30e75 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:08 +0200 Subject: [SCSI] zfcp: Only collect SCSI debug data for matching trace levels The default trace level is to only trace failed SCSI commands. Thus it is not necessary to collect trace data for most SCSI commands since it will be thrown away later. Restructure the SCSI trace infrastructure to first check the trace level in a inline function and only do the expensive data collection for matching trace levels. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 58 +++------------------------------------ drivers/s390/scsi/zfcp_dbf.h | 63 +++++++++++++++++++++++++++++++++++++++++++ drivers/s390/scsi/zfcp_ext.h | 11 +++----- drivers/s390/scsi/zfcp_fsf.c | 1 + drivers/s390/scsi/zfcp_scsi.c | 3 ++- 5 files changed, 73 insertions(+), 63 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 995e6128b6c9..61776d490d13 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -921,13 +921,11 @@ static struct debug_view zfcp_san_dbf_view = { NULL }; -static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, - struct zfcp_adapter *adapter, - struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *fsf_req, - unsigned long old_req_id) +void _zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, + struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, + struct zfcp_fsf_req *fsf_req, + unsigned long old_req_id) { - struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_scsi_dbf_record *rec = &dbf->scsi_dbf_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; @@ -999,54 +997,6 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, spin_unlock_irqrestore(&dbf->scsi_dbf_lock, flags); } -/** - * zfcp_scsi_dbf_event_result - trace event for SCSI command completion - * @tag: tag indicating success or failure of SCSI command - * @level: trace level applicable for this event - * @adapter: adapter that has been used to issue the SCSI command - * @scsi_cmnd: SCSI command pointer - * @fsf_req: request used to issue SCSI command (might be NULL) - */ -void zfcp_scsi_dbf_event_result(const char *tag, int level, - struct zfcp_adapter *adapter, - struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *fsf_req) -{ - zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0); -} - -/** - * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort - * @tag: tag indicating success or failure of abort operation - * @adapter: adapter thas has been used to issue SCSI command to be aborted - * @scsi_cmnd: SCSI command to be aborted - * @new_fsf_req: request containing abort (might be NULL) - * @old_req_id: identifier of request containg SCSI command to be aborted - */ -void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, - struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *new_fsf_req, - unsigned long old_req_id) -{ - zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req, - old_req_id); -} - -/** - * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset - * @tag: tag indicating success or failure of reset operation - * @flag: indicates type of reset (Target Reset, Logical Unit Reset) - * @unit: unit that needs reset - * @scsi_cmnd: SCSI command which caused this error recovery - */ -void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, - struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) -{ - zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, - unit->port->adapter, scsi_cmnd, NULL, 0); -} - static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 4cfd68fe8a26..c2d5ef18b73a 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -22,7 +22,9 @@ #ifndef ZFCP_DBF_H #define ZFCP_DBF_H +#include "zfcp_ext.h" #include "zfcp_fsf.h" +#include "zfcp_def.h" #define ZFCP_DBF_TAG_SIZE 4 #define ZFCP_DBF_ID_SIZE 7 @@ -237,4 +239,65 @@ struct zfcp_dbf { struct zfcp_scsi_dbf_record scsi_dbf_buf; }; +static inline +void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, + struct zfcp_adapter *adapter, struct scsi_cmnd *scmd, + struct zfcp_fsf_req *req, unsigned long old_id) +{ + struct zfcp_dbf *dbf = adapter->dbf; + + if (level <= dbf->scsi_dbf->level) + _zfcp_scsi_dbf_event(tag, tag2, level, dbf, scmd, req, old_id); +} + +/** + * zfcp_scsi_dbf_event_result - trace event for SCSI command completion + * @tag: tag indicating success or failure of SCSI command + * @level: trace level applicable for this event + * @adapter: adapter that has been used to issue the SCSI command + * @scmd: SCSI command pointer + * @fsf_req: request used to issue SCSI command (might be NULL) + */ +static inline +void zfcp_scsi_dbf_event_result(const char *tag, int level, + struct zfcp_adapter *adapter, + struct scsi_cmnd *scmd, + struct zfcp_fsf_req *fsf_req) +{ + zfcp_scsi_dbf_event("rslt", tag, level, adapter, scmd, fsf_req, 0); +} + +/** + * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort + * @tag: tag indicating success or failure of abort operation + * @adapter: adapter thas has been used to issue SCSI command to be aborted + * @scmd: SCSI command to be aborted + * @new_req: request containing abort (might be NULL) + * @old_id: identifier of request containg SCSI command to be aborted + */ +static inline +void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, + struct scsi_cmnd *scmd, + struct zfcp_fsf_req *new_req, + unsigned long old_id) +{ + zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scmd, new_req, old_id); +} + +/** + * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset + * @tag: tag indicating success or failure of reset operation + * @flag: indicates type of reset (Target Reset, Logical Unit Reset) + * @unit: unit that needs reset + * @scsi_cmnd: SCSI command which caused this error recovery + */ +static inline +void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, + struct zfcp_unit *unit, + struct scsi_cmnd *scsi_cmnd) +{ + zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, + unit->port->adapter, scsi_cmnd, NULL, 0); +} + #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 3044c6010306..28e76f5be1af 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -57,14 +57,9 @@ extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); -extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *, - struct scsi_cmnd *, - struct zfcp_fsf_req *); -extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, - struct scsi_cmnd *, struct zfcp_fsf_req *, - unsigned long); -extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, - struct scsi_cmnd *); +extern void _zfcp_scsi_dbf_event(const char *, const char *, int, + struct zfcp_dbf *, struct scsi_cmnd *, + struct zfcp_fsf_req *, unsigned long); /* zfcp_erp.c */ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 47795fbf081f..2635216bfd93 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -11,6 +11,7 @@ #include #include "zfcp_ext.h" +#include "zfcp_dbf.h" #define ZFCP_REQ_AUTO_CLEANUP 0x00000002 #define ZFCP_REQ_NO_QTCB 0x00000008 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 54a7a7474aa5..0bd80a90426a 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -9,8 +9,9 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include "zfcp_ext.h" #include +#include "zfcp_ext.h" +#include "zfcp_dbf.h" static unsigned int default_depth = 32; module_param_named(queue_depth, default_depth, uint, 0600); -- cgit v1.2.3 From 2e261af84cdb6a6008a9c361443e35ea646ec683 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:09 +0200 Subject: [SCSI] zfcp: Only collect FSF/HBA debug data for matching trace levels The default trace level is to only trace failed FSF commands. Thus it is not necessary to collect trace data for most FSF commands, since it will be thrown away later. Restructure the FSF/HBA trace infrastructure to first check the trace level in a inline function and only do the expensive data collection for matching trace levels. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 44 +++++++----------------------------- drivers/s390/scsi/zfcp_dbf.h | 53 ++++++++++++++++++++++++++++++++++++++++++++ drivers/s390/scsi/zfcp_ext.h | 9 +++++--- 3 files changed, 67 insertions(+), 39 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 61776d490d13..5568440ec2fc 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -120,14 +120,10 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, return p - out_buf; } -/** - * zfcp_hba_dbf_event_fsf_response - trace event for request completion - * @fsf_req: request that has been completed - */ -void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) +void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, + struct zfcp_fsf_req *fsf_req, + struct zfcp_dbf *dbf) { - struct zfcp_adapter *adapter = fsf_req->adapter; - struct zfcp_dbf *dbf = adapter->dbf; struct fsf_qtcb *qtcb = fsf_req->qtcb; union fsf_prot_status_qual *prot_status_qual = &qtcb->prefix.prot_status_qual; @@ -138,31 +134,12 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) struct zfcp_send_els *send_els; struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; struct zfcp_hba_dbf_record_response *response = &rec->u.response; - int level; unsigned long flags; spin_lock_irqsave(&dbf->hba_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); - - if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && - (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { - strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE); - level = 1; - } else if (qtcb->header.fsf_status != FSF_GOOD) { - strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE); - level = 1; - } else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || - (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) { - strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE); - level = 4; - } else if (qtcb->header.log_length) { - strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE); - level = 5; - } else { - strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE); - level = 6; - } + strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); response->fsf_command = fsf_req->fsf_command; response->fsf_reqid = fsf_req->req_id; @@ -241,14 +218,9 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } -/** - * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer - * @tag: tag indicating which kind of unsolicited status has been received - * @adapter: adapter that has issued the unsolicited status buffer - * @status_buffer: buffer containing payload of unsolicited status - */ -void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, - struct fsf_status_read_buffer *status_buffer) +void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, + struct zfcp_adapter *adapter, + struct fsf_status_read_buffer *status_buffer) { struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; @@ -296,7 +268,7 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, &status_buffer->payload, rec->u.status.payload_size); } - debug_event(dbf->hba_dbf, 2, rec, sizeof(*rec)); + debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); } diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index c2d5ef18b73a..bceaff449033 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -239,6 +239,59 @@ struct zfcp_dbf { struct zfcp_scsi_dbf_record scsi_dbf_buf; }; +static inline +void zfcp_hba_dbf_event_fsf_resp(const char *tag2, int level, + struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) +{ + if (level <= dbf->hba_dbf->level) + _zfcp_hba_dbf_event_fsf_response(tag2, level, req, dbf); +} + +/** + * zfcp_hba_dbf_event_fsf_response - trace event for request completion + * @fsf_req: request that has been completed + */ +static inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *req) +{ + struct zfcp_dbf *dbf = req->adapter->dbf; + struct fsf_qtcb *qtcb = req->qtcb; + + if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && + (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { + zfcp_hba_dbf_event_fsf_resp("perr", 1, req, dbf); + + } else if (qtcb->header.fsf_status != FSF_GOOD) { + zfcp_hba_dbf_event_fsf_resp("ferr", 1, req, dbf); + + } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || + (req->fsf_command == FSF_QTCB_OPEN_LUN)) { + zfcp_hba_dbf_event_fsf_resp("open", 4, req, dbf); + + } else if (qtcb->header.log_length) { + zfcp_hba_dbf_event_fsf_resp("qtcb", 5, req, dbf); + + } else { + zfcp_hba_dbf_event_fsf_resp("norm", 6, req, dbf); + } + } + +/** + * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer + * @tag: tag indicating which kind of unsolicited status has been received + * @adapter: adapter that has issued the unsolicited status buffer + * @status_buffer: buffer containing payload of unsolicited status + */ +static inline +void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, + struct fsf_status_read_buffer *buf) +{ + struct zfcp_dbf *dbf = adapter->dbf; + int level = 2; + + if (level <= dbf->hba_dbf->level) + _zfcp_hba_dbf_event_fsf_unsol(tag, level, adapter, buf); +} + static inline void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, struct zfcp_adapter *adapter, struct scsi_cmnd *scmd, diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 28e76f5be1af..eeed322f32d0 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -45,9 +45,12 @@ extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *, struct zfcp_adapter *, struct zfcp_port *, struct zfcp_unit *); extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *); -extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *); -extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *, - struct fsf_status_read_buffer *); +extern void _zfcp_hba_dbf_event_fsf_response(const char *, int level, + struct zfcp_fsf_req *, + struct zfcp_dbf *dbf); +extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, + struct zfcp_adapter *, + struct fsf_status_read_buffer *); extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, int); extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, -- cgit v1.2.3 From 44f09f73766a97d9c1ff8bf787cfe6b932eabc2c Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:10 +0200 Subject: [SCSI] zfcp: Remove useless assignment Using a bitwise OR to not set anything at all is pointless so remove the useless statement. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 2635216bfd93..c023db864dc5 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -832,7 +832,6 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) } sbale = zfcp_qdio_sbale_req(req); - sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS; sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; req->sbale_curr = 2; -- cgit v1.2.3 From 14e242ea55a8b807dc1fb7654941caf68a20cd81 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:11 +0200 Subject: [SCSI] zfcp: Only issue one test link command per port When the FCP channel returns a series of commands with the error status "test link", zfcp will send a series of ELS ADISC commands. This is technically no problem, but it is enough to only issue one test command per remote port. So, track whether a ELS ADISC command is already pending, and do not send a new one if there is already a pending command. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 1 + drivers/s390/scsi/zfcp_fc.c | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 601f5a575def..c1becfc1e7fe 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -232,6 +232,7 @@ struct zfcp_ls_adisc { /* remote port status */ #define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 +#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 /* well known address (WKA) port status*/ enum zfcp_wka_status { diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 47daebfa7e59..94c13bd32b5f 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -404,6 +404,7 @@ static void zfcp_fc_adisc_handler(unsigned long data) /* port is good, unblock rport without going through erp */ zfcp_scsi_schedule_rport_register(port); out: + atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); zfcp_port_put(port); kfree(adisc); } @@ -450,13 +451,21 @@ void zfcp_fc_link_test_work(struct work_struct *work) port->rport_task = RPORT_DEL; zfcp_scsi_rport_work(&port->rport_work); + /* only issue one test command at one time per port */ + if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) + goto out; + + atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); + retval = zfcp_fc_adisc(port); if (retval == 0) return; /* send of ADISC was not possible */ + atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); +out: zfcp_port_put(port); } -- cgit v1.2.3 From 55c770fa11d21456e02dc7afb9a37404da9c7b4c Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:12 +0200 Subject: [SCSI] zfcp: Implicitly close all wka ports An adapter shutdown implicitly closes all open ports. Make sure to mark all WKA ports as offline, not only the directory server. Also make sure that no pending wka port work is running when the adapter is being removed. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 1 + drivers/s390/scsi/zfcp_erp.c | 2 +- drivers/s390/scsi/zfcp_ext.h | 4 +--- drivers/s390/scsi/zfcp_fc.c | 11 ++++++++++- 4 files changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 870fe79bc7ea..7aba6840243f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -555,6 +555,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) cancel_work_sync(&adapter->scan_work); cancel_work_sync(&adapter->stat_work); + zfcp_fc_wka_ports_force_offline(adapter->gs); zfcp_adapter_scsi_unregister(adapter); sysfs_remove_group(&adapter->ccw_device->dev.kobj, &zfcp_sysfs_adapter_attrs); diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c75d6f35cb5f..39e4dd15453f 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -714,7 +714,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) zfcp_qdio_close(adapter); zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; - zfcp_fc_wka_port_force_offline(&adapter->gs->ds); + zfcp_fc_wka_ports_force_offline(adapter->gs); /* all ports and units are closed */ zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL, ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index eeed322f32d0..1a66695f11a2 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -104,12 +104,10 @@ extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); -extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); +extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); -extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *); - /* zfcp_fsf.c */ extern int zfcp_fsf_open_port(struct zfcp_erp_action *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 94c13bd32b5f..6d0fef92567b 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -132,7 +132,7 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline); } -void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) +static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) { cancel_delayed_work_sync(&wka->work); mutex_lock(&wka->mutex); @@ -140,6 +140,15 @@ void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) mutex_unlock(&wka->mutex); } +void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) +{ + zfcp_fc_wka_port_force_offline(&gs->ms); + zfcp_fc_wka_port_force_offline(&gs->ts); + zfcp_fc_wka_port_force_offline(&gs->ds); + zfcp_fc_wka_port_force_offline(&gs->as); + zfcp_fc_wka_port_force_offline(&gs->ks); +} + void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) { struct zfcp_wka_ports *gs = adapter->gs; -- cgit v1.2.3 From bd63eaf4b8d783e6033930e377e516169abcadc4 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:13 +0200 Subject: [SCSI] zfcp: fix layering oddities between zfcp_fsf and zfcp_qdio There is no need for the QDIO layer to have knowledge or do things wich are done better by the FSF layer and vice versa. Straighten a few things to improve vividness. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_ext.h | 2 +- drivers/s390/scsi/zfcp_fsf.c | 100 +++++++++++++++++++++++++++++------------- drivers/s390/scsi/zfcp_qdio.c | 45 +------------------ 3 files changed, 73 insertions(+), 74 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 1a66695f11a2..6a3727bdb386 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -133,11 +133,11 @@ extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, extern int zfcp_fsf_send_els(struct zfcp_send_els *); extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, struct scsi_cmnd *); -extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *); extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, struct zfcp_unit *); +extern void zfcp_fsf_reqid_check(struct zfcp_adapter *, int); /* zfcp_qdio.c */ extern int zfcp_qdio_allocate(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c023db864dc5..7ca2995aaf68 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -122,35 +122,6 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req) } } -/** - * zfcp_fsf_req_dismiss_all - dismiss all fsf requests - * @adapter: pointer to struct zfcp_adapter - * - * Never ever call this without shutting down the adapter first. - * Otherwise the adapter would continue using and corrupting s390 storage. - * Included BUG_ON() call to ensure this is done. - * ERP is supposed to be the only user of this function. - */ -void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) -{ - struct zfcp_fsf_req *req, *tmp; - unsigned long flags; - LIST_HEAD(remove_queue); - unsigned int i; - - BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); - spin_lock_irqsave(&adapter->req_list_lock, flags); - for (i = 0; i < REQUEST_LIST_SIZE; i++) - list_splice_init(&adapter->req_list[i], &remove_queue); - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - - list_for_each_entry_safe(req, tmp, &remove_queue, list) { - list_del(&req->list); - req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_fsf_req_complete(req); - } -} - static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) { struct fsf_status_read_buffer *sr_buf = req->data; @@ -459,7 +430,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) * is called to process the completion status and trigger further * events related to the FSF request. */ -void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) +static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) { if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { zfcp_fsf_status_read_handler(req); @@ -492,6 +463,35 @@ void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) wake_up(&req->completion_wq); } +/** + * zfcp_fsf_req_dismiss_all - dismiss all fsf requests + * @adapter: pointer to struct zfcp_adapter + * + * Never ever call this without shutting down the adapter first. + * Otherwise the adapter would continue using and corrupting s390 storage. + * Included BUG_ON() call to ensure this is done. + * ERP is supposed to be the only user of this function. + */ +void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) +{ + struct zfcp_fsf_req *req, *tmp; + unsigned long flags; + LIST_HEAD(remove_queue); + unsigned int i; + + BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); + spin_lock_irqsave(&adapter->req_list_lock, flags); + for (i = 0; i < REQUEST_LIST_SIZE; i++) + list_splice_init(&adapter->req_list[i], &remove_queue); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + list_for_each_entry_safe(req, tmp, &remove_queue, list) { + list_del(&req->list); + req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; + zfcp_fsf_req_complete(req); + } +} + static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) { struct fsf_qtcb_bottom_config *bottom; @@ -2578,3 +2578,43 @@ out: } return ERR_PTR(retval); } + +/** + * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO + * @adapter: pointer to struct zfcp_adapter + * @sbal_idx: response queue index of SBAL to be processed + */ +void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) +{ + struct qdio_buffer *sbal = adapter->resp_q.sbal[sbal_idx]; + struct qdio_buffer_element *sbale; + struct zfcp_fsf_req *fsf_req; + unsigned long flags, req_id; + int idx; + + for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { + + sbale = &sbal->element[idx]; + req_id = (unsigned long) sbale->addr; + spin_lock_irqsave(&adapter->req_list_lock, flags); + fsf_req = zfcp_reqlist_find(adapter, req_id); + + if (!fsf_req) + /* + * Unknown request means that we have potentially memory + * corruption and must stop the machine immediately. + */ + panic("error: unknown req_id (%lx) on adapter %s.\n", + req_id, dev_name(&adapter->ccw_device->dev)); + + list_del(&fsf_req->list); + spin_unlock_irqrestore(&adapter->req_list_lock, flags); + + fsf_req->sbal_response = sbal_idx; + fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); + zfcp_fsf_req_complete(fsf_req); + + if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) + break; + } +} diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e0a215309df0..2e9b3a9cebd9 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -112,31 +112,6 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, wake_up(&adapter->request_wq); } -static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, - unsigned long req_id, int sbal_idx) -{ - struct zfcp_fsf_req *fsf_req; - unsigned long flags; - - spin_lock_irqsave(&adapter->req_list_lock, flags); - fsf_req = zfcp_reqlist_find(adapter, req_id); - - if (!fsf_req) - /* - * Unknown request means that we have potentially memory - * corruption and must stop the machine immediatly. - */ - panic("error: unknown request id (%lx) on adapter %s.\n", - req_id, dev_name(&adapter->ccw_device->dev)); - - zfcp_reqlist_remove(adapter, fsf_req); - spin_unlock_irqrestore(&adapter->req_list_lock, flags); - - fsf_req->sbal_response = sbal_idx; - fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); - zfcp_fsf_req_complete(fsf_req); -} - static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) { struct zfcp_qdio_queue *queue = &adapter->resp_q; @@ -163,9 +138,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, unsigned long parm) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; - struct zfcp_qdio_queue *queue = &adapter->resp_q; - struct qdio_buffer_element *sbale; - int sbal_idx, sbale_idx, sbal_no; + int sbal_idx, sbal_no; if (unlikely(qdio_err)) { zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); @@ -179,22 +152,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, */ for (sbal_no = 0; sbal_no < count; sbal_no++) { sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; - /* go through all SBALEs of SBAL */ - for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER; - sbale_idx++) { - sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx); - zfcp_qdio_reqid_check(adapter, - (unsigned long) sbale->addr, - sbal_idx); - if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) - break; - }; - - if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY))) - dev_warn(&adapter->ccw_device->dev, - "A QDIO protocol error occurred, " - "operations continue\n"); + zfcp_fsf_reqid_check(adapter, sbal_idx); } /* -- cgit v1.2.3 From 058b8647892ed49ba6a0d2c0966a72e20e2e69ff Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:14 +0200 Subject: [SCSI] zfcp: Replace fsf_req wait_queue with completion The combination wait_queue/wakeup in conjunction with the flag ZFCP_STATUS_FSFREQ_COMPLETED to signal the completion of an fsfreq was not race-safe and can be better solved by a completion. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 3 +-- drivers/s390/scsi/zfcp_erp.c | 3 +-- drivers/s390/scsi/zfcp_fsf.c | 26 ++++++-------------------- drivers/s390/scsi/zfcp_scsi.c | 6 ++---- 4 files changed, 10 insertions(+), 28 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index c1becfc1e7fe..944f67754eed 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -248,7 +248,6 @@ enum zfcp_wka_status { /* FSF request status (this does not have a common part) */ #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 -#define ZFCP_STATUS_FSFREQ_COMPLETED 0x00000004 #define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 @@ -532,7 +531,7 @@ struct zfcp_fsf_req { u8 sbale_curr; /* current SBALE during creation of request */ u8 sbal_response; /* SBAL used in interrupt */ - wait_queue_head_t completion_wq; /* can be used by a routine + struct completion completion; /* can be used by a routine to wait for completion */ u32 status; /* status of this request */ u32 fsf_command; /* FSF Command copy */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 39e4dd15453f..a377e2f91251 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -485,8 +485,7 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) zfcp_rec_dbf_event_action("erscf_2", act); - if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED | - ZFCP_STATUS_FSFREQ_DISMISSED)) + if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) act->fsf_req = NULL; } else act->fsf_req = NULL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 7ca2995aaf68..ed06a1d17b73 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -444,23 +444,11 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) if (req->erp_action) zfcp_erp_notify(req->erp_action, 0); - req->status |= ZFCP_STATUS_FSFREQ_COMPLETED; if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) zfcp_fsf_req_free(req); else - /* notify initiator waiting for the requests completion */ - /* - * FIXME: Race! We must not access fsf_req here as it might have been - * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED - * flag. It's an improbable case. But, we have the same paranoia for - * the cleanup flag already. - * Might better be handled using complete()? - * (setting the flag and doing wakeup ought to be atomic - * with regard to checking the flag as long as waitqueue is - * part of the to be released structure) - */ - wake_up(&req->completion_wq); + complete(&req->completion); } /** @@ -733,7 +721,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, INIT_LIST_HEAD(&req->list); init_timer(&req->timer); - init_waitqueue_head(&req->completion_wq); + init_completion(&req->completion); req->adapter = adapter; req->fsf_command = fsf_cmd; @@ -1309,8 +1297,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, retval = zfcp_fsf_req_send(req); spin_unlock_bh(&adapter->req_q_lock); if (!retval) - wait_event(req->completion_wq, - req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&req->completion); zfcp_fsf_req_free(req); return retval; @@ -1405,8 +1392,8 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, spin_unlock_bh(&adapter->req_q_lock); if (!retval) - wait_event(req->completion_wq, - req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&req->completion); + zfcp_fsf_req_free(req); return retval; @@ -2572,8 +2559,7 @@ out: spin_unlock_bh(&adapter->req_q_lock); if (!retval) { - wait_event(req->completion_wq, - req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&req->completion); return req; } return ERR_PTR(retval); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 0bd80a90426a..0de059161b35 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -206,8 +206,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) if (!abrt_req) return FAILED; - wait_event(abrt_req->completion_wq, - abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&abrt_req->completion); if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) dbf_tag = "okay"; @@ -246,8 +245,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) if (!fsf_req) return FAILED; - wait_event(fsf_req->completion_wq, - fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED); + wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); -- cgit v1.2.3 From a4623c467ff76f9258555d44d68371e10c5406c2 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:15 +0200 Subject: [SCSI] zfcp: Improve request allocation through mempools Remove the special case for NO_QTCB requests and optimize the mempool and cache processing for fsfreqs. Especially use seperate mempools for the zfcp_fsf_req and zfcp_qtcb structs. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 98 +++++++++++++++++++++++++------------------- drivers/s390/scsi/zfcp_def.h | 31 ++++++++------ drivers/s390/scsi/zfcp_fc.c | 19 +++------ drivers/s390/scsi/zfcp_fsf.c | 92 ++++++++++++++++++++++------------------- 4 files changed, 127 insertions(+), 113 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 7aba6840243f..de623292277b 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -42,6 +42,12 @@ static char *init_device; module_param_named(device, init_device, charp, 0400); MODULE_PARM_DESC(device, "specify initial device"); +static struct kmem_cache *zfcp_cache_hw_align(const char *name, + unsigned long size) +{ + return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); +} + static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter) { int idx; @@ -110,14 +116,6 @@ out_adapter: return; } -static struct kmem_cache *zfcp_cache_create(int size, char *name) -{ - int align = 1; - while ((size - align) > 0) - align <<= 1; - return kmem_cache_create(name , size, align, 0, NULL); -} - static void __init zfcp_init_device_setup(char *devstr) { char *token; @@ -158,18 +156,23 @@ static int __init zfcp_module_init(void) { int retval = -ENOMEM; - zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create( - sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf"); - if (!zfcp_data.fsf_req_qtcb_cache) + zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", + sizeof(struct ct_iu_gpn_ft_req)); + if (!zfcp_data.gpn_ft_cache) goto out; - zfcp_data.sr_buffer_cache = zfcp_cache_create( - sizeof(struct fsf_status_read_buffer), "zfcp_sr"); + zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", + sizeof(struct fsf_qtcb)); + if (!zfcp_data.qtcb_cache) + goto out_qtcb_cache; + + zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", + sizeof(struct fsf_status_read_buffer)); if (!zfcp_data.sr_buffer_cache) goto out_sr_cache; - zfcp_data.gid_pn_cache = zfcp_cache_create( - sizeof(struct zfcp_gid_pn_data), "zfcp_gid"); + zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", + sizeof(struct zfcp_gid_pn_data)); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; @@ -209,7 +212,9 @@ out_transport: out_gid_cache: kmem_cache_destroy(zfcp_data.sr_buffer_cache); out_sr_cache: - kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); + kmem_cache_destroy(zfcp_data.qtcb_cache); +out_qtcb_cache: + kmem_cache_destroy(zfcp_data.gpn_ft_cache); out: return retval; } @@ -354,36 +359,41 @@ void zfcp_unit_dequeue(struct zfcp_unit *unit) static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { /* must only be called with zfcp_data.config_sema taken */ - adapter->pool.fsf_req_erp = - mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); - if (!adapter->pool.fsf_req_erp) + adapter->pool.erp_req = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.erp_req) return -ENOMEM; - adapter->pool.fsf_req_scsi = - mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); - if (!adapter->pool.fsf_req_scsi) + adapter->pool.scsi_req = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.scsi_req) return -ENOMEM; - adapter->pool.fsf_req_abort = - mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache); - if (!adapter->pool.fsf_req_abort) + adapter->pool.scsi_abort = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.scsi_abort) return -ENOMEM; - adapter->pool.fsf_req_status_read = + adapter->pool.status_read_req = mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM, sizeof(struct zfcp_fsf_req)); - if (!adapter->pool.fsf_req_status_read) + if (!adapter->pool.status_read_req) + return -ENOMEM; + + adapter->pool.qtcb_pool = + mempool_create_slab_pool(3, zfcp_data.qtcb_cache); + if (!adapter->pool.qtcb_pool) return -ENOMEM; - adapter->pool.data_status_read = + adapter->pool.status_read_data = mempool_create_slab_pool(FSF_STATUS_READS_RECOM, zfcp_data.sr_buffer_cache); - if (!adapter->pool.data_status_read) + if (!adapter->pool.status_read_data) return -ENOMEM; - adapter->pool.data_gid_pn = + adapter->pool.gid_pn_data = mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); - if (!adapter->pool.data_gid_pn) + if (!adapter->pool.gid_pn_data) return -ENOMEM; return 0; @@ -392,18 +402,20 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) { /* zfcp_data.config_sema must be held */ - if (adapter->pool.fsf_req_erp) - mempool_destroy(adapter->pool.fsf_req_erp); - if (adapter->pool.fsf_req_scsi) - mempool_destroy(adapter->pool.fsf_req_scsi); - if (adapter->pool.fsf_req_abort) - mempool_destroy(adapter->pool.fsf_req_abort); - if (adapter->pool.fsf_req_status_read) - mempool_destroy(adapter->pool.fsf_req_status_read); - if (adapter->pool.data_status_read) - mempool_destroy(adapter->pool.data_status_read); - if (adapter->pool.data_gid_pn) - mempool_destroy(adapter->pool.data_gid_pn); + if (adapter->pool.erp_req) + mempool_destroy(adapter->pool.erp_req); + if (adapter->pool.scsi_req) + mempool_destroy(adapter->pool.scsi_req); + if (adapter->pool.scsi_abort) + mempool_destroy(adapter->pool.scsi_abort); + if (adapter->pool.qtcb_pool) + mempool_destroy(adapter->pool.qtcb_pool); + if (adapter->pool.status_read_req) + mempool_destroy(adapter->pool.status_read_req); + if (adapter->pool.status_read_data) + mempool_destroy(adapter->pool.status_read_data); + if (adapter->pool.gid_pn_data) + mempool_destroy(adapter->pool.gid_pn_data); } /** diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 944f67754eed..1e27ed5d90e0 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -264,12 +264,13 @@ struct zfcp_fsf_req; /* holds various memory pools of an adapter */ struct zfcp_adapter_mempool { - mempool_t *fsf_req_erp; - mempool_t *fsf_req_scsi; - mempool_t *fsf_req_abort; - mempool_t *fsf_req_status_read; - mempool_t *data_status_read; - mempool_t *data_gid_pn; + mempool_t *erp_req; + mempool_t *scsi_req; + mempool_t *scsi_abort; + mempool_t *status_read_req; + mempool_t *status_read_data; + mempool_t *gid_pn_data; + mempool_t *qtcb_pool; }; /* @@ -303,6 +304,15 @@ struct ct_iu_gid_pn_resp { u32 d_id; } __attribute__ ((packed)); +struct ct_iu_gpn_ft_req { + struct ct_hdr header; + u8 flags; + u8 domain_id_scope; + u8 area_id_scope; + u8 fc4_type; +} __attribute__ ((packed)); + + /** * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct * @wka_port: port where the request is sent to @@ -559,18 +569,13 @@ struct zfcp_data { lists */ struct semaphore config_sema; /* serialises configuration changes */ - struct kmem_cache *fsf_req_qtcb_cache; + struct kmem_cache *gpn_ft_cache; + struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; struct kmem_cache *gid_pn_cache; struct workqueue_struct *work_queue; }; -/* struct used by memory pools for fsf_requests */ -struct zfcp_fsf_req_qtcb { - struct zfcp_fsf_req fsf_req; - struct fsf_qtcb qtcb; -}; - /********************** ZFCP SPECIFIC DEFINES ********************************/ #define ZFCP_SET 0x00000100 diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 6d0fef92567b..acadcd3c276a 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -25,14 +25,6 @@ static u32 rscn_range_mask[] = { [RSCN_FABRIC_ADDRESS] = 0x000000, }; -struct ct_iu_gpn_ft_req { - struct ct_hdr header; - u8 flags; - u8 domain_id_scope; - u8 area_id_scope; - u8 fc4_type; -} __attribute__ ((packed)); - struct gpn_ft_resp_acc { u8 control; u8 port_id[3]; @@ -322,8 +314,7 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, init_completion(&compl_rec.done); compl_rec.handler = zfcp_fc_ns_gid_pn_eval; compl_rec.handler_data = (unsigned long) gid_pn; - ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp, - erp_action); + ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.erp_req, erp_action); if (!ret) wait_for_completion(&compl_rec.done); return ret; @@ -340,7 +331,7 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) struct zfcp_gid_pn_data *gid_pn; struct zfcp_adapter *adapter = erp_action->adapter; - gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC); + gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); if (!gid_pn) return -ENOMEM; @@ -354,7 +345,7 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) zfcp_wka_port_put(&adapter->gs->ds); out: - mempool_free(gid_pn, adapter->pool.data_gid_pn); + mempool_free(gid_pn, adapter->pool.gid_pn_data); return ret; } @@ -497,7 +488,7 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) { struct scatterlist *sg = &gpn_ft->sg_req; - kfree(sg_virt(sg)); /* free request buffer */ + kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg)); zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); kfree(gpn_ft); @@ -512,7 +503,7 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) if (!gpn_ft) return NULL; - req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL); + req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); if (!req) { kfree(gpn_ft); gpn_ft = NULL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index ed06a1d17b73..96c580ee7509 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -14,7 +14,6 @@ #include "zfcp_dbf.h" #define ZFCP_REQ_AUTO_CLEANUP 0x00000002 -#define ZFCP_REQ_NO_QTCB 0x00000008 static void zfcp_fsf_request_timeout_handler(unsigned long data) { @@ -112,14 +111,15 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) void zfcp_fsf_req_free(struct zfcp_fsf_req *req) { if (likely(req->pool)) { + if (likely(req->qtcb)) + mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); mempool_free(req, req->pool); return; } - if (req->qtcb) { - kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req); - return; - } + if (likely(req->qtcb)) + kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb); + kfree(req); } static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) @@ -251,7 +251,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); - mempool_free(sr_buf, adapter->pool.data_status_read); + mempool_free(sr_buf, adapter->pool.status_read_data); zfcp_fsf_req_free(req); return; } @@ -303,7 +303,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) break; } - mempool_free(sr_buf, adapter->pool.data_status_read); + mempool_free(sr_buf, adapter->pool.status_read_data); zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); @@ -669,34 +669,37 @@ static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) return -EIO; } -static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool) +static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) { struct zfcp_fsf_req *req; - req = mempool_alloc(pool, GFP_ATOMIC); - if (!req) + + if (likely(pool)) + req = mempool_alloc(pool, GFP_ATOMIC); + else + req = kmalloc(sizeof(*req), GFP_ATOMIC); + + if (unlikely(!req)) return NULL; + memset(req, 0, sizeof(*req)); req->pool = pool; return req; } -static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool) +static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) { - struct zfcp_fsf_req_qtcb *qtcb; + struct fsf_qtcb *qtcb; if (likely(pool)) qtcb = mempool_alloc(pool, GFP_ATOMIC); else - qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache, - GFP_ATOMIC); + qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC); + if (unlikely(!qtcb)) return NULL; memset(qtcb, 0, sizeof(*qtcb)); - qtcb->fsf_req.qtcb = &qtcb->qtcb; - qtcb->fsf_req.pool = pool; - - return &qtcb->fsf_req; + return qtcb; } static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, @@ -704,14 +707,8 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, mempool_t *pool) { struct qdio_buffer_element *sbale; - - struct zfcp_fsf_req *req; struct zfcp_qdio_queue *req_q = &adapter->req_q; - - if (req_flags & ZFCP_REQ_NO_QTCB) - req = zfcp_fsf_alloc_noqtcb(pool); - else - req = zfcp_fsf_alloc_qtcb(pool); + struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); if (unlikely(!req)) return ERR_PTR(-ENOMEM); @@ -735,7 +732,17 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, sbale[0].addr = (void *) req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; - if (likely(req->qtcb)) { + if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { + if (likely(pool)) + req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); + else + req->qtcb = zfcp_qtcb_alloc(NULL); + + if (unlikely(!req->qtcb)) { + zfcp_fsf_req_free(req); + return ERR_PTR(-ENOMEM); + } + req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; req->qtcb->prefix.req_id = req->req_id; req->qtcb->prefix.ulp_info = 26; @@ -811,9 +818,8 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, - ZFCP_REQ_NO_QTCB, - adapter->pool.fsf_req_status_read); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, 0, + adapter->pool.status_read_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -823,7 +829,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; req->sbale_curr = 2; - sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC); + sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); if (!sr_buf) { retval = -ENOMEM; goto failed_buf; @@ -841,7 +847,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) goto out; failed_req_send: - mempool_free(sr_buf, adapter->pool.data_status_read); + mempool_free(sr_buf, adapter->pool.status_read_data); failed_buf: zfcp_fsf_req_free(req); zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); @@ -919,7 +925,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, if (zfcp_fsf_req_sbal_get(adapter)) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, - 0, adapter->pool.fsf_req_abort); + 0, adapter->pool.scsi_abort); if (IS_ERR(req)) { req = NULL; goto out; @@ -1231,7 +1237,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1327,7 +1333,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1497,7 +1503,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1566,7 +1572,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1643,7 +1649,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; @@ -1697,7 +1703,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; @@ -1788,7 +1794,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -1960,7 +1966,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -2045,7 +2051,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_erp); + adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -2349,7 +2355,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, } req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, ZFCP_REQ_AUTO_CLEANUP, - adapter->pool.fsf_req_scsi); + adapter->pool.scsi_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; @@ -2460,7 +2466,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) if (zfcp_fsf_req_sbal_get(adapter)) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0, - adapter->pool.fsf_req_scsi); + adapter->pool.scsi_req); if (IS_ERR(req)) { req = NULL; goto out; -- cgit v1.2.3 From 09a46c6e34ba152169b7400d266d2efb4c391a43 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:16 +0200 Subject: [SCSI] zfcp: Remove the useless ZFCP_REQ_AUTO_CLEANUP flag The flag ZFCP_REQ_AUTO_CLEANUP was useless as the ZFCP_STATUS_FSFREQ_CLEANUP flag is there for exactly the same purpose. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_fsf.c | 80 +++++++++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 35 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 96c580ee7509..70a978a14f2a 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -13,8 +13,6 @@ #include "zfcp_ext.h" #include "zfcp_dbf.h" -#define ZFCP_REQ_AUTO_CLEANUP 0x00000002 - static void zfcp_fsf_request_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; @@ -703,8 +701,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) } static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, - u32 fsf_cmd, int req_flags, - mempool_t *pool) + u32 fsf_cmd, mempool_t *pool) { struct qdio_buffer_element *sbale; struct zfcp_qdio_queue *req_q = &adapter->req_q; @@ -761,9 +758,6 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, return ERR_PTR(-EIO); } - if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP)) - req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - return req; } @@ -818,7 +812,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, 0, + req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, adapter->pool.status_read_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -925,7 +919,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, if (zfcp_fsf_req_sbal_get(adapter)) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, - 0, adapter->pool.scsi_abort); + adapter->pool.scsi_abort); if (IS_ERR(req)) { req = NULL; goto out; @@ -1081,13 +1075,14 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, - ZFCP_REQ_AUTO_CLEANUP, pool); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, pool); + if (IS_ERR(req)) { ret = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp, FSF_MAX_SBALS_PER_REQ); if (ret) @@ -1189,13 +1184,15 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, - ZFCP_REQ_AUTO_CLEANUP, NULL); + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, NULL); + if (IS_ERR(req)) { ret = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2); if (ret) @@ -1234,15 +1231,16 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, - FSF_QTCB_EXCHANGE_CONFIG_DATA, - ZFCP_REQ_AUTO_CLEANUP, + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1278,8 +1276,8 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, if (zfcp_fsf_req_sbal_get(adapter)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, - 0, NULL); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out_unlock; @@ -1331,14 +1329,16 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1378,8 +1378,8 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, if (zfcp_fsf_req_sbal_get(adapter)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0, - NULL); + req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out_unlock; @@ -1500,15 +1500,15 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, - FSF_QTCB_OPEN_PORT_WITH_DID, - ZFCP_REQ_AUTO_CLEANUP, + req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1571,13 +1571,14 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1646,15 +1647,15 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, - FSF_QTCB_OPEN_PORT_WITH_DID, - ZFCP_REQ_AUTO_CLEANUP, + req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, adapter->pool.erp_req); + if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1702,13 +1703,14 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1793,13 +1795,14 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1965,13 +1968,14 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) goto out; req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2049,14 +2053,16 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; + req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.erp_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; sbale = zfcp_qdio_sbale_req(req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2353,14 +2359,16 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, atomic_inc(&adapter->qdio_outb_full); goto out; } + req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, - ZFCP_REQ_AUTO_CLEANUP, adapter->pool.scsi_req); + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } + req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_unit_get(unit); req->unit = unit; req->data = scsi_cmnd; @@ -2465,8 +2473,10 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) spin_lock_bh(&adapter->req_q_lock); if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0, + + req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, adapter->pool.scsi_req); + if (IS_ERR(req)) { req = NULL; goto out; @@ -2537,7 +2547,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, if (zfcp_fsf_req_sbal_get(adapter)) goto out; - req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL); + req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, NULL); if (IS_ERR(req)) { retval = -EPERM; goto out; -- cgit v1.2.3 From 4544683a4b1d4e65ccca8c736bac56a195a5206b Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:17 +0200 Subject: [SCSI] zfcp: Move workqueue to adapter struct Remove the global driver work queue and replace it with a workqueue local to the adapter. The usage of this workqueue makes this the correct place for the structure. In addition multiple adapters won't block each other due to the serialization of the queued work. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 29 +++++++++++++++++++++++++++-- drivers/s390/scsi/zfcp_def.h | 2 +- drivers/s390/scsi/zfcp_erp.c | 2 +- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/s390/scsi/zfcp_fsf.c | 2 +- drivers/s390/scsi/zfcp_scsi.c | 5 +++-- 6 files changed, 34 insertions(+), 8 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index de623292277b..f785cbc7520d 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -176,8 +176,6 @@ static int __init zfcp_module_init(void) if (!zfcp_data.gid_pn_cache) goto out_gid_cache; - zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq"); - sema_init(&zfcp_data.config_sema, 1); rwlock_init(&zfcp_data.config_lock); @@ -458,6 +456,27 @@ static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) adapter->fsf_lic_version); } +static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter) +{ + char name[TASK_COMM_LEN]; + + snprintf(name, sizeof(name), "zfcp_q_%s", + dev_name(&adapter->ccw_device->dev)); + adapter->work_queue = create_singlethread_workqueue(name); + + if (adapter->work_queue) + return 0; + return -ENOMEM; +} + +static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) +{ + if (adapter->work_queue) + destroy_workqueue(adapter->work_queue); + adapter->work_queue = NULL; + +} + /** * zfcp_adapter_enqueue - enqueue a new adapter to the list * @ccw_device: pointer to the struct cc_device @@ -504,6 +523,9 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (zfcp_adapter_debug_register(adapter)) goto debug_register_failed; + if (zfcp_setup_adapter_work_queue(adapter)) + goto work_queue_failed; + init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_thread_wqh); init_waitqueue_head(&adapter->erp_done_wqh); @@ -543,6 +565,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) return 0; sysfs_failed: + zfcp_destroy_adapter_work_queue(adapter); +work_queue_failed: zfcp_adapter_debug_unregister(adapter); debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); @@ -579,6 +603,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) if (!retval) return; + zfcp_destroy_adapter_work_queue(adapter); zfcp_adapter_debug_unregister(adapter); zfcp_qdio_free(adapter); zfcp_free_low_mem_buffers(adapter); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 1e27ed5d90e0..2715a103e5a8 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -485,6 +485,7 @@ struct zfcp_adapter { struct work_struct scan_work; struct service_level service_level; atomic_t qdio_outb_full; /* queue full incidents */ + struct workqueue_struct *work_queue; }; struct zfcp_port { @@ -573,7 +574,6 @@ struct zfcp_data { struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; struct kmem_cache *gid_pn_cache; - struct workqueue_struct *work_queue; }; /********************** ZFCP SPECIFIC DEFINES ********************************/ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index a377e2f91251..50e5fbe2252a 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -875,7 +875,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) return zfcp_erp_open_ptp_port(act); if (!port->d_id) { zfcp_port_get(port); - if (!queue_work(zfcp_data.work_queue, + if (!queue_work(adapter->work_queue, &port->gid_pn_work)) zfcp_port_put(port); return ZFCP_ERP_CONTINUES; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index acadcd3c276a..8921e16fdab7 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -480,7 +480,7 @@ out: void zfcp_test_link(struct zfcp_port *port) { zfcp_port_get(port); - if (!queue_work(zfcp_data.work_queue, &port->test_link_work)) + if (!queue_work(port->adapter->work_queue, &port->test_link_work)) zfcp_port_put(port); } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 70a978a14f2a..5b73f989a629 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -305,7 +305,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); - queue_work(zfcp_data.work_queue, &adapter->stat_work); + queue_work(adapter->work_queue, &adapter->stat_work); } static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 0de059161b35..2e13d41269a4 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -572,7 +572,7 @@ void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) zfcp_port_get(port); port->rport_task = RPORT_ADD; - if (!queue_work(zfcp_data.work_queue, &port->rport_work)) + if (!queue_work(port->adapter->work_queue, &port->rport_work)) zfcp_port_put(port); } @@ -581,7 +581,8 @@ void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) zfcp_port_get(port); port->rport_task = RPORT_DEL; - if (port->rport && queue_work(zfcp_data.work_queue, &port->rport_work)) + if (port->rport && queue_work(port->adapter->work_queue, + &port->rport_work)) return; zfcp_port_put(port); -- cgit v1.2.3 From 42428f747a8a0db9c6de03e105932316defad65d Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:18 +0200 Subject: [SCSI] zfcp: Separate qdio attributes from zfcp_fsf_req Split all qdio related attributes out of zfcp_fsf_req and put it in new structure. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_dbf.c | 6 +-- drivers/s390/scsi/zfcp_def.h | 84 +++++++++++++++++++++++++------------- drivers/s390/scsi/zfcp_ext.h | 11 +++-- drivers/s390/scsi/zfcp_fsf.c | 79 ++++++++++++++++++----------------- drivers/s390/scsi/zfcp_qdio.c | 95 +++++++++++++++++++++++-------------------- 5 files changed, 160 insertions(+), 115 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 5568440ec2fc..fc7f3d66fe37 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -152,9 +152,9 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, memcpy(response->fsf_status_qual, fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); response->fsf_req_status = fsf_req->status; - response->sbal_first = fsf_req->sbal_first; - response->sbal_last = fsf_req->sbal_last; - response->sbal_response = fsf_req->sbal_response; + response->sbal_first = fsf_req->queue_req.sbal_first; + response->sbal_last = fsf_req->queue_req.sbal_last; + response->sbal_response = fsf_req->queue_req.sbal_response; response->pool = fsf_req->pool != NULL; response->erp_action = (unsigned long)fsf_req->erp_action; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2715a103e5a8..a04bdfd4d2f6 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -529,36 +529,64 @@ struct zfcp_unit { struct work_struct scsi_work; }; -/* FSF request */ +/** + * struct zfcp_queue_req - queue related values for a request + * @sbal_number: number of free SBALs + * @sbal_first: first SBAL for this request + * @sbal_last: last SBAL for this request + * @sbal_limit: last possible SBAL for this request + * @sbale_curr: current SBALE at creation of this request + * @sbal_response: SBAL used in interrupt + * @qdio_outb_usage: usage of outbound queue + * @qdio_inb_usage: usage of inbound queue + */ +struct zfcp_queue_req { + u8 sbal_number; + u8 sbal_first; + u8 sbal_last; + u8 sbal_limit; + u8 sbale_curr; + u8 sbal_response; + u16 qdio_outb_usage; + u16 qdio_inb_usage; +}; + +/** + * struct zfcp_fsf_req - basic FSF request structure + * @list: list of FSF requests + * @req_id: unique request ID + * @adapter: adapter this request belongs to + * @queue_req: queue related values + * @completion: used to signal the completion of the request + * @status: status of the request + * @fsf_command: FSF command issued + * @qtcb: associated QTCB + * @seq_no: sequence number of this request + * @data: private data + * @timer: timer data of this request + * @erp_action: reference to erp action if request issued on behalf of ERP + * @pool: reference to memory pool if used for this request + * @issued: time when request was send (STCK) + * @unit: reference to unit if this request is a SCSI request + * @handler: handler which should be called to process response + */ struct zfcp_fsf_req { - struct list_head list; /* list of FSF requests */ - unsigned long req_id; /* unique request ID */ - struct zfcp_adapter *adapter; /* adapter request belongs to */ - u8 sbal_number; /* nr of SBALs free for use */ - u8 sbal_first; /* first SBAL for this request */ - u8 sbal_last; /* last SBAL for this request */ - u8 sbal_limit; /* last possible SBAL for - this reuest */ - u8 sbale_curr; /* current SBALE during creation - of request */ - u8 sbal_response; /* SBAL used in interrupt */ - struct completion completion; /* can be used by a routine - to wait for completion */ - u32 status; /* status of this request */ - u32 fsf_command; /* FSF Command copy */ - struct fsf_qtcb *qtcb; /* address of associated QTCB */ - u32 seq_no; /* Sequence number of request */ - void *data; /* private data of request */ - struct timer_list timer; /* used for erp or scsi er */ - struct zfcp_erp_action *erp_action; /* used if this request is - issued on behalf of erp */ - mempool_t *pool; /* used if request was alloacted - from emergency pool */ - unsigned long long issued; /* request sent time (STCK) */ - struct zfcp_unit *unit; + struct list_head list; + unsigned long req_id; + struct zfcp_adapter *adapter; + struct zfcp_queue_req queue_req; + struct completion completion; + u32 status; + u32 fsf_command; + struct fsf_qtcb *qtcb; + u32 seq_no; + void *data; + struct timer_list timer; + struct zfcp_erp_action *erp_action; + mempool_t *pool; + unsigned long long issued; + struct zfcp_unit *unit; void (*handler)(struct zfcp_fsf_req *); - u16 qdio_outb_usage;/* usage of outbound queue */ - u16 qdio_inb_usage; /* usage of inbound queue */ }; /* driver data */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 6a3727bdb386..d11c0f44dad3 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -142,10 +142,13 @@ extern void zfcp_fsf_reqid_check(struct zfcp_adapter *, int); /* zfcp_qdio.c */ extern int zfcp_qdio_allocate(struct zfcp_adapter *); extern void zfcp_qdio_free(struct zfcp_adapter *); -extern int zfcp_qdio_send(struct zfcp_fsf_req *); -extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *); -extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *); -extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long, +extern int zfcp_qdio_send(struct zfcp_adapter *, struct zfcp_queue_req *); +extern struct qdio_buffer_element + *zfcp_qdio_sbale_req(struct zfcp_adapter *, struct zfcp_queue_req *); +extern struct qdio_buffer_element + *zfcp_qdio_sbale_curr(struct zfcp_adapter *, struct zfcp_queue_req *); +extern int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *, + struct zfcp_queue_req *, unsigned long, struct scatterlist *, int); extern int zfcp_qdio_open(struct zfcp_adapter *); extern void zfcp_qdio_close(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 5b73f989a629..e88b7804780b 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -720,12 +720,12 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, req->adapter = adapter; req->fsf_command = fsf_cmd; req->req_id = adapter->req_no; - req->sbal_number = 1; - req->sbal_first = req_q->first; - req->sbal_last = req_q->first; - req->sbale_curr = 1; + req->queue_req.sbal_number = 1; + req->queue_req.sbal_first = req_q->first; + req->queue_req.sbal_last = req_q->first; + req->queue_req.sbale_curr = 1; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].addr = (void *) req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; @@ -774,9 +774,9 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) list_add_tail(&req->list, &adapter->req_list[idx]); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - req->qdio_outb_usage = atomic_read(&adapter->req_q.count); + req->queue_req.qdio_outb_usage = atomic_read(&adapter->req_q.count); req->issued = get_clock(); - if (zfcp_qdio_send(req)) { + if (zfcp_qdio_send(adapter, &req->queue_req)) { del_timer(&req->timer); spin_lock_irqsave(&adapter->req_list_lock, flags); /* lookup request again, list might have changed */ @@ -819,9 +819,9 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) goto out; } - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; - req->sbale_curr = 2; + req->queue_req.sbale_curr = 2; sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); if (!sr_buf) { @@ -830,7 +830,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) } memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; - sbale = zfcp_qdio_sbale_curr(req); + sbale = zfcp_qdio_sbale_curr(adapter, &req->queue_req); sbale->addr = (void *) sr_buf; sbale->length = sizeof(*sr_buf); @@ -929,7 +929,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, ZFCP_STATUS_COMMON_UNBLOCKED))) goto out_error_free; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1023,8 +1023,10 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, struct scatterlist *sg_resp, int max_sbals) { - struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req); - u32 feat = req->adapter->adapter_features; + struct zfcp_adapter *adapter = req->adapter; + struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter, + &req->queue_req); + u32 feat = adapter->adapter_features; int bytes; if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { @@ -1041,14 +1043,16 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, return 0; } - bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, + bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + SBAL_FLAGS0_TYPE_WRITE_READ, sg_req, max_sbals); if (bytes <= 0) return -EIO; req->qtcb->bottom.support.req_buf_length = bytes; - req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; + req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; - bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ, + bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + SBAL_FLAGS0_TYPE_WRITE_READ, sg_resp, max_sbals); if (bytes <= 0) return -EIO; @@ -1241,7 +1245,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1283,7 +1287,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, goto out_unlock; } - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_exchange_config_data_handler; @@ -1339,7 +1343,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1388,7 +1392,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, if (data) req->data = data; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1509,7 +1513,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1579,7 +1583,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1656,7 +1660,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1711,7 +1715,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1803,7 +1807,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1976,7 +1980,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2063,7 +2067,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2140,8 +2144,8 @@ static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) } if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR) trace.flags |= ZFCP_BLK_REQ_ERROR; - trace.inb_usage = fsf_req->qdio_inb_usage; - trace.outb_usage = fsf_req->qdio_outb_usage; + trace.inb_usage = fsf_req->queue_req.qdio_inb_usage; + trace.outb_usage = fsf_req->queue_req.qdio_outb_usage; blk_add_driver_data(req->q, req, &trace, sizeof(trace)); } @@ -2420,11 +2424,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); - real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype, + real_bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, sbtype, scsi_sglist(scsi_cmnd), FSF_MAX_SBALS_PER_REQ); if (unlikely(real_bytes < 0)) { - if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) { + if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) { dev_err(&adapter->ccw_device->dev, "Oversize data package, unit 0x%016Lx " "on port 0x%016Lx closed\n", @@ -2492,7 +2496,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + sizeof(u32); - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2555,15 +2559,15 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, req->handler = zfcp_fsf_control_file_handler; - sbale = zfcp_qdio_sbale_req(req); + sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); sbale[0].flags |= direction; bottom = &req->qtcb->bottom.support; bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; bottom->option = fsf_cfdc->option; - bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg, - FSF_MAX_SBALS_PER_REQ); + bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, direction, + fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ); if (bytes != ZFCP_CFDC_MAX_SIZE) { zfcp_fsf_req_free(req); goto out; @@ -2612,8 +2616,9 @@ void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) list_del(&fsf_req->list); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - fsf_req->sbal_response = sbal_idx; - fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count); + fsf_req->queue_req.sbal_response = sbal_idx; + fsf_req->queue_req.qdio_inb_usage = + atomic_read(&adapter->resp_q.count); zfcp_fsf_req_complete(fsf_req); if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 2e9b3a9cebd9..e118874976f0 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -3,7 +3,7 @@ * * Setup and helper functions to access QDIO. * - * Copyright IBM Corporation 2002, 2008 + * Copyright IBM Corporation 2002, 2009 */ #define KMSG_COMPONENT "zfcp" @@ -165,12 +165,14 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, /** * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req - * @fsf_req: pointer to struct fsf_req + * @adapter: pointer to struct zfcp_adapter + * @q_rec: pointer to struct zfcp_queue_rec * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) +struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0); + return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, 0); } /** @@ -178,74 +180,80 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req) * @fsf_req: pointer to struct fsf_req * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req) +struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, - req->sbale_curr); + return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, + q_req->sbale_curr); } -static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) +static void zfcp_qdio_sbal_limit(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req, int max_sbals) { - int count = atomic_read(&fsf_req->adapter->req_q.count); + int count = atomic_read(&adapter->req_q.count); count = min(count, max_sbals); - fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1) + q_req->sbal_limit = (q_req->sbal_first + count - 1) % QDIO_MAX_BUFFERS_PER_Q; } static struct qdio_buffer_element * -zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) +zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, + unsigned long sbtype) { struct qdio_buffer_element *sbale; /* set last entry flag in current SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_curr(fsf_req); + sbale = zfcp_qdio_sbale_curr(adapter, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ - if (fsf_req->sbal_last == fsf_req->sbal_limit) + if (q_req->sbal_last == q_req->sbal_limit) return NULL; /* set chaining flag in first SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_req(fsf_req); + sbale = zfcp_qdio_sbale_req(adapter, q_req); sbale->flags |= SBAL_FLAGS0_MORE_SBALS; /* calculate index of next SBAL */ - fsf_req->sbal_last++; - fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; + q_req->sbal_last++; + q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; /* keep this requests number of SBALs up-to-date */ - fsf_req->sbal_number++; + q_req->sbal_number++; /* start at first SBALE of new SBAL */ - fsf_req->sbale_curr = 0; + q_req->sbale_curr = 0; /* set storage-block type for new SBAL */ - sbale = zfcp_qdio_sbale_curr(fsf_req); + sbale = zfcp_qdio_sbale_curr(adapter, q_req); sbale->flags |= sbtype; return sbale; } static struct qdio_buffer_element * -zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) +zfcp_qdio_sbale_next(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, + unsigned int sbtype) { - if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) - return zfcp_qdio_sbal_chain(fsf_req, sbtype); - fsf_req->sbale_curr++; - return zfcp_qdio_sbale_curr(fsf_req); + if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) + return zfcp_qdio_sbal_chain(adapter, q_req, sbtype); + q_req->sbale_curr++; + return zfcp_qdio_sbale_curr(adapter, q_req); } -static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req) +static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req) { - struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal; - int first = fsf_req->sbal_first; - int last = fsf_req->sbal_last; + struct qdio_buffer **sbal = adapter->req_q.sbal; + int first = q_req->sbal_first; + int last = q_req->sbal_last; int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % QDIO_MAX_BUFFERS_PER_Q + 1; zfcp_qdio_zero_sbals(sbal, first, count); } -static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, +static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req, unsigned int sbtype, void *start_addr, unsigned int total_length) { @@ -256,10 +264,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, /* split segment up */ for (addr = start_addr, remaining = total_length; remaining > 0; addr += length, remaining -= length) { - sbale = zfcp_qdio_sbale_next(fsf_req, sbtype); + sbale = zfcp_qdio_sbale_next(adapter, q_req, sbtype); if (!sbale) { - atomic_inc(&fsf_req->adapter->qdio_outb_full); - zfcp_qdio_undo_sbals(fsf_req); + atomic_inc(&adapter->qdio_outb_full); + zfcp_qdio_undo_sbals(adapter, q_req); return -EINVAL; } @@ -281,29 +289,31 @@ static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req, * @max_sbals: upper bound for number of SBALs to be used * Returns: number of bytes, or error (negativ) */ -int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, - struct scatterlist *sg, int max_sbals) +int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, + struct zfcp_queue_req *q_req, + unsigned long sbtype, struct scatterlist *sg, + int max_sbals) { struct qdio_buffer_element *sbale; int retval, bytes = 0; /* figure out last allowed SBAL */ - zfcp_qdio_sbal_limit(fsf_req, max_sbals); + zfcp_qdio_sbal_limit(adapter, q_req, max_sbals); /* set storage-block type for this request */ - sbale = zfcp_qdio_sbale_req(fsf_req); + sbale = zfcp_qdio_sbale_req(adapter, q_req); sbale->flags |= sbtype; for (; sg; sg = sg_next(sg)) { - retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg), - sg->length); + retval = zfcp_qdio_fill_sbals(adapter, q_req, sbtype, + sg_virt(sg), sg->length); if (retval < 0) return retval; bytes += sg->length; } /* assume that no other SBALEs are to follow in the same SBAL */ - sbale = zfcp_qdio_sbale_curr(fsf_req); + sbale = zfcp_qdio_sbale_curr(adapter, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; return bytes; @@ -314,12 +324,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, * @fsf_req: pointer to struct zfcp_fsf_req * Returns: 0 on success, error otherwise */ -int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req) +int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) { - struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_qdio_queue *req_q = &adapter->req_q; - int first = fsf_req->sbal_first; - int count = fsf_req->sbal_number; + int first = q_req->sbal_first; + int count = q_req->sbal_number; int retval; unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; -- cgit v1.2.3 From 564e1c86c810f9ccfe4300afa402815e3db4886d Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:19 +0200 Subject: [SCSI] zfcp: Move qdio related data out of zfcp_adapter The zfcp_adapter structure was growing over time to a size of almost one memory page. To reduce the size of the data structure and to seperate different layers, put all qdio related data in the new zfcp_qdio data structure. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 20 ++- drivers/s390/scsi/zfcp_dbf.c | 6 +- drivers/s390/scsi/zfcp_def.h | 34 +++-- drivers/s390/scsi/zfcp_erp.c | 8 +- drivers/s390/scsi/zfcp_ext.h | 26 ++-- drivers/s390/scsi/zfcp_fsf.c | 299 +++++++++++++++++++++-------------------- drivers/s390/scsi/zfcp_qdio.c | 224 +++++++++++++++--------------- drivers/s390/scsi/zfcp_scsi.c | 6 +- drivers/s390/scsi/zfcp_sysfs.c | 16 +-- 9 files changed, 343 insertions(+), 296 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index f785cbc7520d..ed7211ef04eb 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -428,7 +428,7 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) int zfcp_status_read_refill(struct zfcp_adapter *adapter) { while (atomic_read(&adapter->stat_miss) > 0) - if (zfcp_fsf_status_read(adapter)) { + if (zfcp_fsf_status_read(adapter->qdio)) { if (atomic_read(&adapter->stat_miss) >= 16) { zfcp_erp_adapter_reopen(adapter, 0, "axsref1", NULL); @@ -507,11 +507,16 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) return -ENOMEM; } + adapter->qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); + if (!adapter->qdio) + goto qdio_mem_failed; + + adapter->qdio->adapter = adapter; ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); - if (zfcp_qdio_allocate(adapter)) + if (zfcp_qdio_allocate(adapter->qdio, ccw_device)) goto qdio_allocate_failed; if (zfcp_allocate_low_mem_buffers(adapter)) @@ -536,8 +541,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) spin_lock_init(&adapter->req_list_lock); - spin_lock_init(&adapter->req_q_lock); - spin_lock_init(&adapter->qdio_stat_lock); + spin_lock_init(&adapter->qdio->req_q_lock); + spin_lock_init(&adapter->qdio->stat_lock); rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); @@ -574,7 +579,9 @@ debug_register_failed: failed_low_mem_buffers: zfcp_free_low_mem_buffers(adapter); qdio_allocate_failed: - zfcp_qdio_free(adapter); + zfcp_qdio_free(adapter->qdio); + kfree(adapter->qdio); +qdio_mem_failed: kfree(adapter); return -ENOMEM; } @@ -605,12 +612,13 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) zfcp_destroy_adapter_work_queue(adapter); zfcp_adapter_debug_unregister(adapter); - zfcp_qdio_free(adapter); + zfcp_qdio_free(adapter->qdio); zfcp_free_low_mem_buffers(adapter); kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); kfree(adapter->gs); + kfree(adapter->qdio); kfree(adapter); } diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index fc7f3d66fe37..3179b08bda6a 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -274,16 +274,16 @@ void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, /** * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure - * @adapter: adapter affected by this QDIO related event + * @qdio: qdio structure affected by this QDIO related event * @qdio_error: as passed by qdio module * @sbal_index: first buffer with error condition, as passed by qdio module * @sbal_count: number of buffers affected, as passed by qdio module */ -void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, +void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *qdio, unsigned int qdio_error, int sbal_index, int sbal_count) { - struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf *dbf = qdio->adapter->dbf; struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; unsigned long flags; diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index a04bdfd4d2f6..bac5c497eab5 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -428,6 +428,29 @@ struct zfcp_latencies { spinlock_t lock; }; +/** struct zfcp_qdio - basic QDIO data structure + * @resp_q: response queue + * @req_q: request queue + * @stat_lock: lock to protect req_q_util and req_q_time + * @req_q_lock; lock to serialize access to request queue + * @req_q_time: time of last fill level change + * @req_q_util: used for accounting + * @req_q_full: queue full incidents + * @req_q_wq: used to wait for SBAL availability + * @adapter: adapter used in conjunction with this QDIO structure + */ +struct zfcp_qdio { + struct zfcp_qdio_queue resp_q; + struct zfcp_qdio_queue req_q; + spinlock_t stat_lock; + spinlock_t req_q_lock; + ktime_t req_q_time; + u64 req_q_util; + atomic_t req_q_full; + wait_queue_head_t req_q_wq; + struct zfcp_adapter *adapter; +}; + struct zfcp_adapter { atomic_t refcount; /* reference count */ wait_queue_head_t remove_wq; /* can be used to wait for @@ -436,6 +459,7 @@ struct zfcp_adapter { u64 peer_wwpn; /* P2P peer WWPN */ u32 peer_d_id; /* P2P peer D_ID */ struct ccw_device *ccw_device; /* S/390 ccw device */ + struct zfcp_qdio *qdio; u32 hydra_version; /* Hydra version */ u32 fsf_lic_version; u32 adapter_features; /* FCP channel features */ @@ -447,15 +471,7 @@ struct zfcp_adapter { unsigned long req_no; /* unique FSF req number */ struct list_head *req_list; /* list of pending reqs */ spinlock_t req_list_lock; /* request list lock */ - struct zfcp_qdio_queue req_q; /* request queue */ - spinlock_t req_q_lock; /* for operations on queue */ - ktime_t req_q_time; /* time of last fill level change */ - u64 req_q_util; /* for accounting */ - spinlock_t qdio_stat_lock; u32 fsf_req_seq_no; /* FSF cmnd seq number */ - wait_queue_head_t request_wq; /* can be used to wait for - more avaliable SBALs */ - struct zfcp_qdio_queue resp_q; /* response queue */ rwlock_t abort_lock; /* Protects against SCSI stack abort/command completion races */ @@ -478,13 +494,11 @@ struct zfcp_adapter { struct zfcp_wka_ports *gs; /* generic services */ struct zfcp_dbf *dbf; /* debug traces */ struct zfcp_adapter_mempool pool; /* Adapter memory pools */ - struct qdio_initialize qdio_init_data; /* for qdio_establish */ struct fc_host_statistics *fc_stats; struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; struct work_struct scan_work; struct service_level service_level; - atomic_t qdio_outb_full; /* queue full incidents */ struct workqueue_struct *work_queue; }; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 50e5fbe2252a..feda1db56b23 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -603,9 +603,11 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) { - if (zfcp_qdio_open(act->adapter)) + struct zfcp_qdio *qdio = act->adapter->qdio; + + if (zfcp_qdio_open(qdio)) return ZFCP_ERP_FAILED; - init_waitqueue_head(&act->adapter->request_wq); + init_waitqueue_head(&qdio->req_q_wq); atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); return ZFCP_ERP_SUCCEEDED; } @@ -710,7 +712,7 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act) struct zfcp_adapter *adapter = act->adapter; /* close queues to ensure that buffers are not accessed by adapter */ - zfcp_qdio_close(adapter); + zfcp_qdio_close(adapter->qdio); zfcp_fsf_req_dismiss_all(adapter); adapter->fsf_req_seq_no = 0; zfcp_fc_wka_ports_force_offline(adapter->gs); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d11c0f44dad3..e97947d2f2ed 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -51,7 +51,7 @@ extern void _zfcp_hba_dbf_event_fsf_response(const char *, int level, extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, struct zfcp_adapter *, struct fsf_status_read_buffer *); -extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int, +extern void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *, unsigned int, int, int); extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, struct zfcp_fsf_req *); @@ -118,15 +118,15 @@ extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); extern int zfcp_fsf_close_unit(struct zfcp_erp_action *); extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *); -extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *, +extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *, struct fsf_qtcb_bottom_config *); extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *); -extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *, +extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *, struct fsf_qtcb_bottom_port *); extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, struct zfcp_fsf_cfdc *); extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); -extern int zfcp_fsf_status_read(struct zfcp_adapter *); +extern int zfcp_fsf_status_read(struct zfcp_qdio *); extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, struct zfcp_erp_action *); @@ -137,21 +137,21 @@ extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8); extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, struct zfcp_unit *); -extern void zfcp_fsf_reqid_check(struct zfcp_adapter *, int); +extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); /* zfcp_qdio.c */ -extern int zfcp_qdio_allocate(struct zfcp_adapter *); -extern void zfcp_qdio_free(struct zfcp_adapter *); -extern int zfcp_qdio_send(struct zfcp_adapter *, struct zfcp_queue_req *); +extern int zfcp_qdio_allocate(struct zfcp_qdio *, struct ccw_device *); +extern void zfcp_qdio_free(struct zfcp_qdio *); +extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); extern struct qdio_buffer_element - *zfcp_qdio_sbale_req(struct zfcp_adapter *, struct zfcp_queue_req *); + *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *); extern struct qdio_buffer_element - *zfcp_qdio_sbale_curr(struct zfcp_adapter *, struct zfcp_queue_req *); -extern int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *, + *zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *); +extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_queue_req *, unsigned long, struct scatterlist *, int); -extern int zfcp_qdio_open(struct zfcp_adapter *); -extern void zfcp_qdio_close(struct zfcp_adapter *); +extern int zfcp_qdio_open(struct zfcp_qdio *); +extern void zfcp_qdio_close(struct zfcp_qdio *); /* zfcp_scsi.c */ extern struct zfcp_data zfcp_data; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index e88b7804780b..b9a16e4b48b4 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -637,33 +637,34 @@ static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) } } -static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter) +static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio) { - struct zfcp_qdio_queue *req_q = &adapter->req_q; + struct zfcp_qdio_queue *req_q = &qdio->req_q; - spin_lock_bh(&adapter->req_q_lock); + spin_lock_bh(&qdio->req_q_lock); if (atomic_read(&req_q->count)) return 1; - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return 0; } -static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter) +static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio) { + struct zfcp_adapter *adapter = qdio->adapter; long ret; - spin_unlock_bh(&adapter->req_q_lock); - ret = wait_event_interruptible_timeout(adapter->request_wq, - zfcp_fsf_sbal_check(adapter), 5 * HZ); + spin_unlock_bh(&qdio->req_q_lock); + ret = wait_event_interruptible_timeout(qdio->req_q_wq, + zfcp_fsf_sbal_check(qdio), 5 * HZ); if (ret > 0) return 0; if (!ret) { - atomic_inc(&adapter->qdio_outb_full); + atomic_inc(&qdio->req_q_full); /* assume hanging outbound queue, try queue recovery */ zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL); } - spin_lock_bh(&adapter->req_q_lock); + spin_lock_bh(&qdio->req_q_lock); return -EIO; } @@ -700,11 +701,12 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) return qtcb; } -static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, +static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, u32 fsf_cmd, mempool_t *pool) { struct qdio_buffer_element *sbale; - struct zfcp_qdio_queue *req_q = &adapter->req_q; + struct zfcp_qdio_queue *req_q = &qdio->req_q; + struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); if (unlikely(!req)) @@ -725,7 +727,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, req->queue_req.sbal_last = req_q->first; req->queue_req.sbale_curr = 1; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].addr = (void *) req->req_id; sbale[0].flags |= SBAL_FLAGS0_COMMAND; @@ -740,7 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, return ERR_PTR(-ENOMEM); } - req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no; + req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; req->qtcb->prefix.req_id = req->req_id; req->qtcb->prefix.ulp_info = 26; req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; @@ -764,6 +766,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter, static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) { struct zfcp_adapter *adapter = req->adapter; + struct zfcp_qdio *qdio = adapter->qdio; unsigned long flags; int idx; int with_qtcb = (req->qtcb != NULL); @@ -774,9 +777,9 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) list_add_tail(&req->list, &adapter->req_list[idx]); spin_unlock_irqrestore(&adapter->req_list_lock, flags); - req->queue_req.qdio_outb_usage = atomic_read(&adapter->req_q.count); + req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count); req->issued = get_clock(); - if (zfcp_qdio_send(adapter, &req->queue_req)) { + if (zfcp_qdio_send(qdio, &req->queue_req)) { del_timer(&req->timer); spin_lock_irqsave(&adapter->req_list_lock, flags); /* lookup request again, list might have changed */ @@ -801,25 +804,26 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) * @req_flags: request flags * Returns: 0 on success, ERROR otherwise */ -int zfcp_fsf_status_read(struct zfcp_adapter *adapter) +int zfcp_fsf_status_read(struct zfcp_qdio *qdio) { + struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req; struct fsf_status_read_buffer *sr_buf; struct qdio_buffer_element *sbale; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS, + req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, adapter->pool.status_read_req); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY; req->queue_req.sbale_curr = 2; @@ -830,7 +834,7 @@ int zfcp_fsf_status_read(struct zfcp_adapter *adapter) } memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; - sbale = zfcp_qdio_sbale_curr(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req); sbale->addr = (void *) sr_buf; sbale->length = sizeof(*sr_buf); @@ -846,7 +850,7 @@ failed_buf: zfcp_fsf_req_free(req); zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -913,13 +917,13 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_qdio *qdio = unit->port->adapter->qdio; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND, - adapter->pool.scsi_abort); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, + qdio->adapter->pool.scsi_abort); if (IS_ERR(req)) { req = NULL; goto out; @@ -929,7 +933,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id, ZFCP_STATUS_COMMON_UNBLOCKED))) goto out_error_free; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -947,7 +951,7 @@ out_error_free: zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return req; } @@ -1024,7 +1028,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, int max_sbals) { struct zfcp_adapter *adapter = req->adapter; - struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter, + struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio, &req->queue_req); u32 feat = adapter->adapter_features; int bytes; @@ -1043,7 +1047,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, return 0; } - bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_req, max_sbals); if (bytes <= 0) @@ -1051,7 +1055,7 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, req->qtcb->bottom.support.req_buf_length = bytes; req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL; - bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, + bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, SBAL_FLAGS0_TYPE_WRITE_READ, sg_resp, max_sbals); if (bytes <= 0) @@ -1071,15 +1075,15 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, struct zfcp_erp_action *erp_action) { struct zfcp_wka_port *wka_port = ct->wka_port; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int ret = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC, pool); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool); if (IS_ERR(req)) { ret = PTR_ERR(req); @@ -1118,7 +1122,7 @@ failed_send: if (erp_action) erp_action->fsf_req = NULL; out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return ret; } @@ -1181,15 +1185,15 @@ skip_fsfstatus: int zfcp_fsf_send_els(struct zfcp_send_els *els) { struct zfcp_fsf_req *req; - struct zfcp_adapter *adapter = els->adapter; + struct zfcp_qdio *qdio = els->adapter->qdio; struct fsf_qtcb_bottom_support *bottom; int ret = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL); if (IS_ERR(req)) { ret = PTR_ERR(req); @@ -1221,7 +1225,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) failed_send: zfcp_fsf_req_free(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return ret; } @@ -1229,15 +1233,15 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1245,7 +1249,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1265,29 +1269,29 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } -int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, +int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_config *data) { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL); if (IS_ERR(req)) { retval = PTR_ERR(req); goto out_unlock; } - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_exchange_config_data_handler; @@ -1303,7 +1307,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1311,7 +1315,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter, return retval; out_unlock: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1322,20 +1326,20 @@ out_unlock: */ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) { + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req; - struct zfcp_adapter *adapter = erp_action->adapter; int retval = -EIO; - if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) + if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1343,7 +1347,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1358,31 +1362,31 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } /** * zfcp_fsf_exchange_port_data_sync - request information about local port - * @adapter: pointer to struct zfcp_adapter + * @qdio: pointer to struct zfcp_qdio * @data: pointer to struct fsf_qtcb_bottom_port * Returns: 0 on success, error otherwise */ -int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, +int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, struct fsf_qtcb_bottom_port *data) { struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; int retval = -EIO; - if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) + if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) return -EOPNOTSUPP; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out_unlock; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1392,14 +1396,14 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, if (data) req->data = data; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_exchange_port_data_handler; zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); if (!retval) wait_for_completion(&req->completion); @@ -1409,7 +1413,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter, return retval; out_unlock: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1495,17 +1499,17 @@ out: int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_fsf_req *req; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_port *port = erp_action->port; + struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1513,7 +1517,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1532,7 +1536,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) zfcp_port_put(port); } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1566,16 +1570,16 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1583,7 +1587,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1600,7 +1604,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1643,16 +1647,16 @@ out: int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_PORT_WITH_DID, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, + qdio->adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); @@ -1660,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1673,7 +1677,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1698,16 +1702,16 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = wka_port->adapter; + struct zfcp_qdio *qdio = wka_port->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, + qdio->adapter->pool.erp_req); if (unlikely(IS_ERR(req))) { retval = PTR_ERR(req); @@ -1715,7 +1719,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1728,7 +1732,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) if (retval) zfcp_fsf_req_free(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1790,16 +1794,16 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -1807,7 +1811,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -1824,7 +1828,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -1964,14 +1968,15 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN, + req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, adapter->pool.erp_req); if (IS_ERR(req)) { @@ -1980,7 +1985,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2001,7 +2006,7 @@ int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -2050,16 +2055,16 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) { struct qdio_buffer_element *sbale; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_qdio *qdio = erp_action->adapter->qdio; struct zfcp_fsf_req *req; int retval = -EIO; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN, - adapter->pool.erp_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, + qdio->adapter->pool.erp_req); if (IS_ERR(req)) { retval = PTR_ERR(req); @@ -2067,7 +2072,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) } req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_READ; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2085,7 +2090,7 @@ int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action) erp_action->fsf_req = NULL; } out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return retval; } @@ -2353,18 +2358,19 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; int real_bytes, retval = -EIO; struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_qdio *qdio = adapter->qdio; if (unlikely(!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return -EBUSY; - spin_lock(&adapter->req_q_lock); - if (atomic_read(&adapter->req_q.count) <= 0) { - atomic_inc(&adapter->qdio_outb_full); + spin_lock(&qdio->req_q_lock); + if (atomic_read(&qdio->req_q.count) <= 0) { + atomic_inc(&qdio->req_q_full); goto out; } - req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, + req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, adapter->pool.scsi_req); if (IS_ERR(req)) { @@ -2424,7 +2430,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32); - real_bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, sbtype, + real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, scsi_sglist(scsi_cmnd), FSF_MAX_SBALS_PER_REQ); if (unlikely(real_bytes < 0)) { @@ -2453,7 +2459,7 @@ failed_scsi_cmnd: zfcp_fsf_req_free(req); scsi_cmnd->host_scribble = NULL; out: - spin_unlock(&adapter->req_q_lock); + spin_unlock(&qdio->req_q_lock); return retval; } @@ -2468,18 +2474,18 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) struct qdio_buffer_element *sbale; struct zfcp_fsf_req *req = NULL; struct fcp_cmnd_iu *fcp_cmnd_iu; - struct zfcp_adapter *adapter = unit->port->adapter; + struct zfcp_qdio *qdio = unit->port->adapter->qdio; if (unlikely(!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return NULL; - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, - adapter->pool.scsi_req); + req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, + qdio->adapter->pool.scsi_req); if (IS_ERR(req)) { req = NULL; @@ -2496,7 +2502,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + sizeof(u32); - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; @@ -2511,7 +2517,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags) zfcp_fsf_req_free(req); req = NULL; out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); return req; } @@ -2529,6 +2535,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, struct zfcp_fsf_cfdc *fsf_cfdc) { struct qdio_buffer_element *sbale; + struct zfcp_qdio *qdio = adapter->qdio; struct zfcp_fsf_req *req = NULL; struct fsf_qtcb_bottom_support *bottom; int direction, retval = -EIO, bytes; @@ -2547,11 +2554,11 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, return ERR_PTR(-EINVAL); } - spin_lock_bh(&adapter->req_q_lock); - if (zfcp_fsf_req_sbal_get(adapter)) + spin_lock_bh(&qdio->req_q_lock); + if (zfcp_fsf_req_sbal_get(qdio)) goto out; - req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, NULL); + req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL); if (IS_ERR(req)) { retval = -EPERM; goto out; @@ -2559,15 +2566,16 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, req->handler = zfcp_fsf_control_file_handler; - sbale = zfcp_qdio_sbale_req(adapter, &req->queue_req); + sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); sbale[0].flags |= direction; bottom = &req->qtcb->bottom.support; bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; bottom->option = fsf_cfdc->option; - bytes = zfcp_qdio_sbals_from_sg(adapter, &req->queue_req, direction, - fsf_cfdc->sg, FSF_MAX_SBALS_PER_REQ); + bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, + direction, fsf_cfdc->sg, + FSF_MAX_SBALS_PER_REQ); if (bytes != ZFCP_CFDC_MAX_SIZE) { zfcp_fsf_req_free(req); goto out; @@ -2576,7 +2584,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); out: - spin_unlock_bh(&adapter->req_q_lock); + spin_unlock_bh(&qdio->req_q_lock); if (!retval) { wait_for_completion(&req->completion); @@ -2590,9 +2598,10 @@ out: * @adapter: pointer to struct zfcp_adapter * @sbal_idx: response queue index of SBAL to be processed */ -void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) +void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) { - struct qdio_buffer *sbal = adapter->resp_q.sbal[sbal_idx]; + struct zfcp_adapter *adapter = qdio->adapter; + struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx]; struct qdio_buffer_element *sbale; struct zfcp_fsf_req *fsf_req; unsigned long flags, req_id; @@ -2618,7 +2627,7 @@ void zfcp_fsf_reqid_check(struct zfcp_adapter *adapter, int sbal_idx) fsf_req->queue_req.sbal_response = sbal_idx; fsf_req->queue_req.qdio_inb_usage = - atomic_read(&adapter->resp_q.count); + atomic_read(&qdio->resp_q.count); zfcp_fsf_req_complete(fsf_req); if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index e118874976f0..0b3f634509bf 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -36,18 +36,18 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) /** * zfcp_qdio_free - free memory used by request- and resposne queue - * @adapter: pointer to the zfcp_adapter structure + * @qdio: pointer to the zfcp_qdio structure */ -void zfcp_qdio_free(struct zfcp_adapter *adapter) +void zfcp_qdio_free(struct zfcp_qdio *qdio) { struct qdio_buffer **sbal_req, **sbal_resp; int p; - if (adapter->ccw_device) - qdio_free(adapter->ccw_device); + if (qdio->adapter->ccw_device) + qdio_free(qdio->adapter->ccw_device); - sbal_req = adapter->req_q.sbal; - sbal_resp = adapter->resp_q.sbal; + sbal_req = qdio->req_q.sbal; + sbal_resp = qdio->resp_q.sbal; for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { free_page((unsigned long) sbal_req[p]); @@ -55,8 +55,10 @@ void zfcp_qdio_free(struct zfcp_adapter *adapter) } } -static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id) +static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) { + struct zfcp_adapter *adapter = qdio->adapter; + dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); zfcp_erp_adapter_reopen(adapter, @@ -75,47 +77,47 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) } /* this needs to be called prior to updating the queue fill level */ -static void zfcp_qdio_account(struct zfcp_adapter *adapter) +static void zfcp_qdio_account(struct zfcp_qdio *qdio) { ktime_t now; s64 span; int free, used; - spin_lock(&adapter->qdio_stat_lock); + spin_lock(&qdio->stat_lock); now = ktime_get(); - span = ktime_us_delta(now, adapter->req_q_time); - free = max(0, atomic_read(&adapter->req_q.count)); + span = ktime_us_delta(now, qdio->req_q_time); + free = max(0, atomic_read(&qdio->req_q.count)); used = QDIO_MAX_BUFFERS_PER_Q - free; - adapter->req_q_util += used * span; - adapter->req_q_time = now; - spin_unlock(&adapter->qdio_stat_lock); + qdio->req_q_util += used * span; + qdio->req_q_time = now; + spin_unlock(&qdio->stat_lock); } static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int first, int count, unsigned long parm) { - struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; - struct zfcp_qdio_queue *queue = &adapter->req_q; + struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; + struct zfcp_qdio_queue *queue = &qdio->req_q; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); - zfcp_qdio_handler_error(adapter, "qdireq1"); + zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_qdio_handler_error(qdio, "qdireq1"); return; } /* cleanup all SBALs being program-owned now */ zfcp_qdio_zero_sbals(queue->sbal, first, count); - zfcp_qdio_account(adapter); + zfcp_qdio_account(qdio); atomic_add(count, &queue->count); - wake_up(&adapter->request_wq); + wake_up(&qdio->req_q_wq); } -static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed) +static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed) { - struct zfcp_qdio_queue *queue = &adapter->resp_q; - struct ccw_device *cdev = adapter->ccw_device; + struct zfcp_qdio_queue *queue = &qdio->resp_q; + struct ccw_device *cdev = qdio->adapter->ccw_device; u8 count, start = queue->first; unsigned int retval; @@ -137,12 +139,12 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, int queue_no, int first, int count, unsigned long parm) { - struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm; + struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; int sbal_idx, sbal_no; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count); - zfcp_qdio_handler_error(adapter, "qdires1"); + zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_qdio_handler_error(qdio, "qdires1"); return; } @@ -153,26 +155,26 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, for (sbal_no = 0; sbal_no < count; sbal_no++) { sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; /* go through all SBALEs of SBAL */ - zfcp_fsf_reqid_check(adapter, sbal_idx); + zfcp_fsf_reqid_check(qdio, sbal_idx); } /* * put range of SBALs back to response queue * (including SBALs which have already been free before) */ - zfcp_qdio_resp_put_back(adapter, count); + zfcp_qdio_resp_put_back(qdio, count); } /** * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req - * @adapter: pointer to struct zfcp_adapter + * @qdio: pointer to struct zfcp_qdio * @q_rec: pointer to struct zfcp_queue_rec * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, +struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, 0); + return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0); } /** @@ -180,30 +182,30 @@ struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_adapter *adapter, * @fsf_req: pointer to struct fsf_req * Returns: pointer to qdio_buffer_element (SBALE) structure */ -struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_adapter *adapter, +struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - return zfcp_qdio_sbale(&adapter->req_q, q_req->sbal_last, + return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, q_req->sbale_curr); } -static void zfcp_qdio_sbal_limit(struct zfcp_adapter *adapter, +static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, int max_sbals) { - int count = atomic_read(&adapter->req_q.count); + int count = atomic_read(&qdio->req_q.count); count = min(count, max_sbals); q_req->sbal_limit = (q_req->sbal_first + count - 1) % QDIO_MAX_BUFFERS_PER_Q; } static struct qdio_buffer_element * -zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, +zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned long sbtype) { struct qdio_buffer_element *sbale; /* set last entry flag in current SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_curr(adapter, q_req); + sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; /* don't exceed last allowed SBAL */ @@ -211,7 +213,7 @@ zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, return NULL; /* set chaining flag in first SBALE of current SBAL */ - sbale = zfcp_qdio_sbale_req(adapter, q_req); + sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->flags |= SBAL_FLAGS0_MORE_SBALS; /* calculate index of next SBAL */ @@ -225,26 +227,26 @@ zfcp_qdio_sbal_chain(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, q_req->sbale_curr = 0; /* set storage-block type for new SBAL */ - sbale = zfcp_qdio_sbale_curr(adapter, q_req); + sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->flags |= sbtype; return sbale; } static struct qdio_buffer_element * -zfcp_qdio_sbale_next(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req, +zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned int sbtype) { if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) - return zfcp_qdio_sbal_chain(adapter, q_req, sbtype); + return zfcp_qdio_sbal_chain(qdio, q_req, sbtype); q_req->sbale_curr++; - return zfcp_qdio_sbale_curr(adapter, q_req); + return zfcp_qdio_sbale_curr(qdio, q_req); } -static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, +static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - struct qdio_buffer **sbal = adapter->req_q.sbal; + struct qdio_buffer **sbal = qdio->req_q.sbal; int first = q_req->sbal_first; int last = q_req->sbal_last; int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) % @@ -252,7 +254,7 @@ static void zfcp_qdio_undo_sbals(struct zfcp_adapter *adapter, zfcp_qdio_zero_sbals(sbal, first, count); } -static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, +static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned int sbtype, void *start_addr, unsigned int total_length) @@ -264,10 +266,10 @@ static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, /* split segment up */ for (addr = start_addr, remaining = total_length; remaining > 0; addr += length, remaining -= length) { - sbale = zfcp_qdio_sbale_next(adapter, q_req, sbtype); + sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype); if (!sbale) { - atomic_inc(&adapter->qdio_outb_full); - zfcp_qdio_undo_sbals(adapter, q_req); + atomic_inc(&qdio->req_q_full); + zfcp_qdio_undo_sbals(qdio, q_req); return -EINVAL; } @@ -289,7 +291,7 @@ static int zfcp_qdio_fill_sbals(struct zfcp_adapter *adapter, * @max_sbals: upper bound for number of SBALs to be used * Returns: number of bytes, or error (negativ) */ -int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, +int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req, unsigned long sbtype, struct scatterlist *sg, int max_sbals) @@ -298,14 +300,14 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, int retval, bytes = 0; /* figure out last allowed SBAL */ - zfcp_qdio_sbal_limit(adapter, q_req, max_sbals); + zfcp_qdio_sbal_limit(qdio, q_req, max_sbals); /* set storage-block type for this request */ - sbale = zfcp_qdio_sbale_req(adapter, q_req); + sbale = zfcp_qdio_sbale_req(qdio, q_req); sbale->flags |= sbtype; for (; sg; sg = sg_next(sg)) { - retval = zfcp_qdio_fill_sbals(adapter, q_req, sbtype, + retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype, sg_virt(sg), sg->length); if (retval < 0) return retval; @@ -313,7 +315,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, } /* assume that no other SBALEs are to follow in the same SBAL */ - sbale = zfcp_qdio_sbale_curr(adapter, q_req); + sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->flags |= SBAL_FLAGS_LAST_ENTRY; return bytes; @@ -321,20 +323,22 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_adapter *adapter, /** * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO - * @fsf_req: pointer to struct zfcp_fsf_req + * @qdio: pointer to struct zfcp_qdio + * @q_req: pointer to struct zfcp_queue_req * Returns: 0 on success, error otherwise */ -int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) +int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req) { - struct zfcp_qdio_queue *req_q = &adapter->req_q; + struct zfcp_qdio_queue *req_q = &qdio->req_q; int first = q_req->sbal_first; int count = q_req->sbal_number; int retval; unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; - zfcp_qdio_account(adapter); + zfcp_qdio_account(qdio); - retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count); + retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first, + count); if (unlikely(retval)) { zfcp_qdio_zero_sbals(req_q->sbal, first, count); return retval; @@ -347,63 +351,69 @@ int zfcp_qdio_send(struct zfcp_adapter *adapter, struct zfcp_queue_req *q_req) return 0; } + +static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, + struct zfcp_qdio *qdio) +{ + + id->cdev = qdio->adapter->ccw_device; + id->q_format = QDIO_ZFCP_QFMT; + memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); + ASCEBC(id->adapter_name, 8); + id->qib_param_field_format = 0; + id->qib_param_field = NULL; + id->input_slib_elements = NULL; + id->output_slib_elements = NULL; + id->no_input_qs = 1; + id->no_output_qs = 1; + id->input_handler = zfcp_qdio_int_resp; + id->output_handler = zfcp_qdio_int_req; + id->int_parm = (unsigned long) qdio; + id->flags = QDIO_INBOUND_0COPY_SBALS | + QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; + id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal); + id->output_sbal_addr_array = (void **) (qdio->req_q.sbal); + +} /** * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data * @adapter: pointer to struct zfcp_adapter * Returns: -ENOMEM on memory allocation error or return value from * qdio_allocate */ -int zfcp_qdio_allocate(struct zfcp_adapter *adapter) +int zfcp_qdio_allocate(struct zfcp_qdio *qdio, struct ccw_device *ccw_dev) { - struct qdio_initialize *init_data; + struct qdio_initialize init_data; - if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) || - zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal)) + if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) || + zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal)) return -ENOMEM; - init_data = &adapter->qdio_init_data; - - init_data->cdev = adapter->ccw_device; - init_data->q_format = QDIO_ZFCP_QFMT; - memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8); - ASCEBC(init_data->adapter_name, 8); - init_data->qib_param_field_format = 0; - init_data->qib_param_field = NULL; - init_data->input_slib_elements = NULL; - init_data->output_slib_elements = NULL; - init_data->no_input_qs = 1; - init_data->no_output_qs = 1; - init_data->input_handler = zfcp_qdio_int_resp; - init_data->output_handler = zfcp_qdio_int_req; - init_data->int_parm = (unsigned long) adapter; - init_data->flags = QDIO_INBOUND_0COPY_SBALS | - QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS; - init_data->input_sbal_addr_array = - (void **) (adapter->resp_q.sbal); - init_data->output_sbal_addr_array = - (void **) (adapter->req_q.sbal); - - return qdio_allocate(init_data); + zfcp_qdio_setup_init_data(&init_data, qdio); + + return qdio_allocate(&init_data); } /** * zfcp_close_qdio - close qdio queues for an adapter + * @qdio: pointer to structure zfcp_qdio */ -void zfcp_qdio_close(struct zfcp_adapter *adapter) +void zfcp_qdio_close(struct zfcp_qdio *qdio) { struct zfcp_qdio_queue *req_q; int first, count; - if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) + if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return; /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ - req_q = &adapter->req_q; - spin_lock_bh(&adapter->req_q_lock); - atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); - spin_unlock_bh(&adapter->req_q_lock); + req_q = &qdio->req_q; + spin_lock_bh(&qdio->req_q_lock); + atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); + spin_unlock_bh(&qdio->req_q_lock); - qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); + qdio_shutdown(qdio->adapter->ccw_device, + QDIO_FLAG_CLEANUP_USING_CLEAR); /* cleanup used outbound sbals */ count = atomic_read(&req_q->count); @@ -414,50 +424,54 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter) } req_q->first = 0; atomic_set(&req_q->count, 0); - adapter->resp_q.first = 0; - atomic_set(&adapter->resp_q.count, 0); + qdio->resp_q.first = 0; + atomic_set(&qdio->resp_q.count, 0); } /** * zfcp_qdio_open - prepare and initialize response queue - * @adapter: pointer to struct zfcp_adapter + * @qdio: pointer to struct zfcp_qdio * Returns: 0 on success, otherwise -EIO */ -int zfcp_qdio_open(struct zfcp_adapter *adapter) +int zfcp_qdio_open(struct zfcp_qdio *qdio) { struct qdio_buffer_element *sbale; + struct qdio_initialize init_data; + struct ccw_device *cdev = qdio->adapter->ccw_device; int cc; - if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) + if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) return -EIO; - if (qdio_establish(&adapter->qdio_init_data)) + zfcp_qdio_setup_init_data(&init_data, qdio); + + if (qdio_establish(&init_data)) goto failed_establish; - if (qdio_activate(adapter->ccw_device)) + if (qdio_activate(cdev)) goto failed_qdio; for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { - sbale = &(adapter->resp_q.sbal[cc]->element[0]); + sbale = &(qdio->resp_q.sbal[cc]->element[0]); sbale->length = 0; sbale->flags = SBAL_FLAGS_LAST_ENTRY; sbale->addr = NULL; } - if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0, + if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) goto failed_qdio; /* set index of first avalable SBALS / number of available SBALS */ - adapter->req_q.first = 0; - atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q); + qdio->req_q.first = 0; + atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q); return 0; failed_qdio: - qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); + qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); failed_establish: - dev_err(&adapter->ccw_device->dev, + dev_err(&cdev->dev, "Setting up the QDIO connection to the FCP adapter failed\n"); return -EIO; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 2e13d41269a4..4414720c87f6 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -225,7 +225,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { struct zfcp_unit *unit = scpnt->device->hostdata; struct zfcp_adapter *adapter = unit->port->adapter; - struct zfcp_fsf_req *fsf_req; + struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS; int retry = 3; @@ -429,7 +429,7 @@ static struct fc_host_statistics *zfcp_get_fc_host_stats(struct Scsi_Host *host) if (!data) return NULL; - ret = zfcp_fsf_exchange_port_data_sync(adapter, data); + ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); if (ret) { kfree(data); return NULL; @@ -458,7 +458,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost) if (!data) return; - ret = zfcp_fsf_exchange_port_data_sync(adapter, data); + ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data); if (ret) kfree(data); else { diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 0fe5cce818cb..a6cf62636834 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -425,7 +425,7 @@ static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev, if (!qtcb_port) return -ENOMEM; - retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port); + retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port); if (!retval) retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util, qtcb_port->cb_util, qtcb_port->a_util); @@ -451,7 +451,7 @@ static int zfcp_sysfs_adapter_ex_config(struct device *dev, if (!qtcb_config) return -ENOMEM; - retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config); + retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config); if (!retval) *stat_inf = qtcb_config->stat_info; @@ -492,15 +492,15 @@ static ssize_t zfcp_sysfs_adapter_q_full_show(struct device *dev, char *buf) { struct Scsi_Host *scsi_host = class_to_shost(dev); - struct zfcp_adapter *adapter = - (struct zfcp_adapter *) scsi_host->hostdata[0]; + struct zfcp_qdio *qdio = + ((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio; u64 util; - spin_lock_bh(&adapter->qdio_stat_lock); - util = adapter->req_q_util; - spin_unlock_bh(&adapter->qdio_stat_lock); + spin_lock_bh(&qdio->stat_lock); + util = qdio->req_q_util; + spin_unlock_bh(&qdio->stat_lock); - return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full), + return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full), (unsigned long long)util); } static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL); -- cgit v1.2.3 From 799b76d09aeee558d18c1f5b93e63f58f1d1fc11 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:20 +0200 Subject: [SCSI] zfcp: Decouple gid_pn requests from erp Don't let the erp wait for gid_pn requests to complete. Instead, queue the gid_pn work, exit erp and let the finished gid_pn work trigger a new port reopen. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 9 +++++++-- drivers/s390/scsi/zfcp_def.h | 1 + drivers/s390/scsi/zfcp_erp.c | 28 +-------------------------- drivers/s390/scsi/zfcp_ext.h | 6 ++---- drivers/s390/scsi/zfcp_fc.c | 45 +++++++++++++++++++++++++++++++++----------- drivers/s390/scsi/zfcp_fsf.c | 14 ++------------ 6 files changed, 47 insertions(+), 56 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index ed7211ef04eb..572dcd67e713 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -362,6 +362,11 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) if (!adapter->pool.erp_req) return -ENOMEM; + adapter->pool.gid_pn_req = + mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); + if (!adapter->pool.gid_pn_req) + return -ENOMEM; + adapter->pool.scsi_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.scsi_req) @@ -379,7 +384,7 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) return -ENOMEM; adapter->pool.qtcb_pool = - mempool_create_slab_pool(3, zfcp_data.qtcb_cache); + mempool_create_slab_pool(4, zfcp_data.qtcb_cache); if (!adapter->pool.qtcb_pool) return -ENOMEM; @@ -652,7 +657,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, init_waitqueue_head(&port->remove_wq); INIT_LIST_HEAD(&port->unit_list_head); - INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup); + INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index bac5c497eab5..9a8ff0553421 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -265,6 +265,7 @@ struct zfcp_fsf_req; /* holds various memory pools of an adapter */ struct zfcp_adapter_mempool { mempool_t *erp_req; + mempool_t *gid_pn_req; mempool_t *scsi_req; mempool_t *scsi_abort; mempool_t *status_read_req; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index feda1db56b23..67297d2744fb 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -26,7 +26,6 @@ enum zfcp_erp_steps { ZFCP_ERP_STEP_FSF_XCONFIG = 0x0001, ZFCP_ERP_STEP_PHYS_PORT_CLOSING = 0x0010, ZFCP_ERP_STEP_PORT_CLOSING = 0x0100, - ZFCP_ERP_STEP_NAMESERVER_LOOKUP = 0x0400, ZFCP_ERP_STEP_PORT_OPENING = 0x0800, ZFCP_ERP_STEP_UNIT_CLOSING = 0x1000, ZFCP_ERP_STEP_UNIT_OPENING = 0x2000, @@ -842,27 +841,6 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act) return zfcp_erp_port_strategy_open_port(act); } -void zfcp_erp_port_strategy_open_lookup(struct work_struct *work) -{ - int retval; - struct zfcp_port *port = container_of(work, struct zfcp_port, - gid_pn_work); - - retval = zfcp_fc_ns_gid_pn(&port->erp_action); - if (!retval) { - port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP; - goto out; - } - if (retval == -ENOMEM) { - zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM); - goto out; - } - /* all other error condtions */ - zfcp_erp_notify(&port->erp_action, 0); -out: - zfcp_port_put(port); -} - static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) { struct zfcp_adapter *adapter = act->adapter; @@ -880,12 +858,8 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) if (!queue_work(adapter->work_queue, &port->gid_pn_work)) zfcp_port_put(port); - return ZFCP_ERP_CONTINUES; + return ZFCP_ERP_EXIT; } - /* fall through */ - case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: - if (!port->d_id) - return ZFCP_ERP_FAILED; return zfcp_erp_port_strategy_open_port(act); case ZFCP_ERP_STEP_PORT_OPENING: diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index e97947d2f2ed..7650cec7f71f 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -94,13 +94,12 @@ extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *); extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, void *); extern void zfcp_erp_timeout_handler(unsigned long); -extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *); /* zfcp_fc.c */ extern int zfcp_scan_ports(struct zfcp_adapter *); extern void _zfcp_scan_ports_later(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); -extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *); +extern void zfcp_fc_port_did_lookup(struct work_struct *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); @@ -128,8 +127,7 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *, extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); extern int zfcp_fsf_status_read(struct zfcp_qdio *); extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); -extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *, - struct zfcp_erp_action *); +extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *); extern int zfcp_fsf_send_els(struct zfcp_send_els *); extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, struct scsi_cmnd *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 8921e16fdab7..bc0c9f54d0d8 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -282,15 +282,15 @@ static void zfcp_fc_ns_gid_pn_eval(unsigned long data) port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; } -int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, +static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, struct zfcp_gid_pn_data *gid_pn) { - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_adapter *adapter = port->adapter; struct zfcp_fc_ns_handler_data compl_rec; int ret; /* setup parameters for send generic command */ - gid_pn->port = erp_action->port; + gid_pn->port = port; gid_pn->ct.wka_port = &adapter->gs->ds; gid_pn->ct.handler = zfcp_fc_ns_handler; gid_pn->ct.handler_data = (unsigned long) &compl_rec; @@ -309,12 +309,12 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; - gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn; + gid_pn->ct_iu_req.wwpn = port->wwpn; init_completion(&compl_rec.done); compl_rec.handler = zfcp_fc_ns_gid_pn_eval; compl_rec.handler_data = (unsigned long) gid_pn; - ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.erp_req, erp_action); + ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req); if (!ret) wait_for_completion(&compl_rec.done); return ret; @@ -322,14 +322,14 @@ int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action, /** * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request - * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed + * @port: port where GID_PN request is needed * return: -ENOMEM on error, 0 otherwise */ -int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) +static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) { int ret; struct zfcp_gid_pn_data *gid_pn; - struct zfcp_adapter *adapter = erp_action->adapter; + struct zfcp_adapter *adapter = port->adapter; gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); if (!gid_pn) @@ -341,7 +341,7 @@ int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action) if (ret) goto out; - ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn); + ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); zfcp_wka_port_put(&adapter->gs->ds); out: @@ -349,6 +349,29 @@ out: return ret; } +void zfcp_fc_port_did_lookup(struct work_struct *work) +{ + int ret; + struct zfcp_port *port = container_of(work, struct zfcp_port, + gid_pn_work); + + ret = zfcp_fc_ns_gid_pn(port); + if (ret) { + /* could not issue gid_pn for some reason */ + zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL); + goto out; + } + + if (!port->d_id) { + zfcp_erp_port_failed(port, "fcgpn_2", NULL); + goto out; + } + + zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); +out: + zfcp_port_put(port); +} + /** * zfcp_fc_plogi_evaluate - evaluate PLOGI playload * @port: zfcp_port structure @@ -551,7 +574,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, init_completion(&compl_rec.done); compl_rec.handler = NULL; - ret = zfcp_fsf_send_ct(ct, NULL, NULL); + ret = zfcp_fsf_send_ct(ct, NULL); if (!ret) wait_for_completion(&compl_rec.done); return ret; @@ -840,7 +863,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) ct_fc_job->ct.completion = NULL; ct_fc_job->job = job; - ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL); + ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL); if (ret) { kfree(ct_fc_job); zfcp_wka_port_put(ct_fc_job->ct.wka_port); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index b9a16e4b48b4..048f1a848f34 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1069,10 +1069,8 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) * @ct: pointer to struct zfcp_send_ct with data for request * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req - * @erp_action: if non-null the Generic Service request sent within ERP */ -int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, - struct zfcp_erp_action *erp_action) +int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) { struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_qdio *qdio = wka_port->adapter->qdio; @@ -1103,13 +1101,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, req->data = ct; zfcp_san_dbf_event_ct_request(req); - - if (erp_action) { - erp_action->fsf_req = req; - req->erp_action = erp_action; - zfcp_fsf_start_erp_timer(req); - } else - zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); if (ret) @@ -1119,8 +1111,6 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool, failed_send: zfcp_fsf_req_free(req); - if (erp_action) - erp_action->fsf_req = NULL; out: spin_unlock_bh(&qdio->req_q_lock); return ret; -- cgit v1.2.3 From 5771710bd5edfafcb8656f49b93690a6fae5a4d2 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:21 +0200 Subject: [SCSI] zfcp: Update dbf calls Change the dbf data and functions to use the zfcp_dbf prefix throughout the code. Also change the calls to dbf to use zfcp_dbf instead of zfcp_adapter. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 7 +- drivers/s390/scsi/zfcp_dbf.c | 408 ++++++++++++++++++++---------------------- drivers/s390/scsi/zfcp_dbf.h | 152 ++++++++-------- drivers/s390/scsi/zfcp_erp.c | 49 +++-- drivers/s390/scsi/zfcp_ext.h | 52 +++--- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/s390/scsi/zfcp_fsf.c | 24 +-- drivers/s390/scsi/zfcp_qdio.c | 6 +- drivers/s390/scsi/zfcp_scsi.c | 27 ++- 9 files changed, 347 insertions(+), 380 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 572dcd67e713..7a50f64c36bd 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -517,6 +517,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto qdio_mem_failed; adapter->qdio->adapter = adapter; + ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); @@ -530,7 +531,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (zfcp_reqlist_alloc(adapter)) goto failed_low_mem_buffers; - if (zfcp_adapter_debug_register(adapter)) + if (zfcp_dbf_adapter_register(adapter)) goto debug_register_failed; if (zfcp_setup_adapter_work_queue(adapter)) @@ -577,7 +578,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) sysfs_failed: zfcp_destroy_adapter_work_queue(adapter); work_queue_failed: - zfcp_adapter_debug_unregister(adapter); + zfcp_dbf_adapter_unregister(adapter->dbf); debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); kfree(adapter->req_list); @@ -616,7 +617,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) return; zfcp_destroy_adapter_work_queue(adapter); - zfcp_adapter_debug_unregister(adapter); + zfcp_dbf_adapter_unregister(adapter->dbf); zfcp_qdio_free(adapter->qdio); zfcp_free_low_mem_buffers(adapter); kfree(adapter->req_list); diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 3179b08bda6a..c066428b2878 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -120,9 +120,9 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, return p - out_buf; } -void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, - struct zfcp_fsf_req *fsf_req, - struct zfcp_dbf *dbf) +void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, + struct zfcp_fsf_req *fsf_req, + struct zfcp_dbf *dbf) { struct fsf_qtcb *qtcb = fsf_req->qtcb; union fsf_prot_status_qual *prot_status_qual = @@ -132,11 +132,11 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, struct zfcp_port *port; struct zfcp_unit *unit; struct zfcp_send_els *send_els; - struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; - struct zfcp_hba_dbf_record_response *response = &rec->u.response; + struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; + struct zfcp_dbf_hba_record_response *response = &rec->u.response; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); @@ -203,7 +203,7 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, break; } - debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->hba, level, rec, sizeof(*rec)); /* have fcp channel microcode fixed to use as little as possible */ if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { @@ -211,27 +211,25 @@ void _zfcp_hba_dbf_event_fsf_response(const char *tag2, int level, char *buf = (char *)qtcb + qtcb->header.log_start; int len = qtcb->header.log_length; for (; len && !buf[len - 1]; len--); - zfcp_dbf_hexdump(dbf->hba_dbf, rec, sizeof(*rec), level, buf, + zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf, len); } - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } -void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, - struct zfcp_adapter *adapter, - struct fsf_status_read_buffer *status_buffer) +void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf, + struct fsf_status_read_buffer *status_buffer) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_hba_dbf_record *rec = &dbf->hba_dbf_buf; + struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); - rec->u.status.failed = atomic_read(&adapter->stat_miss); + rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss); if (status_buffer != NULL) { rec->u.status.status_type = status_buffer->status_type; rec->u.status.status_subtype = status_buffer->status_subtype; @@ -268,58 +266,54 @@ void _zfcp_hba_dbf_event_fsf_unsol(const char *tag, int level, &status_buffer->payload, rec->u.status.payload_size); } - debug_event(dbf->hba_dbf, level, rec, sizeof(*rec)); - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + debug_event(dbf->hba, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** - * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure + * zfcp_dbf_hba_qdio - trace event for QDIO related failure * @qdio: qdio structure affected by this QDIO related event * @qdio_error: as passed by qdio module * @sbal_index: first buffer with error condition, as passed by qdio module * @sbal_count: number of buffers affected, as passed by qdio module */ -void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *qdio, - unsigned int qdio_error, int sbal_index, - int sbal_count) +void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error, + int sbal_index, int sbal_count) { - struct zfcp_dbf *dbf = qdio->adapter->dbf; - struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; + struct zfcp_dbf_hba_record *r = &dbf->hba_buf; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); r->u.qdio.qdio_error = qdio_error; r->u.qdio.sbal_index = sbal_index; r->u.qdio.sbal_count = sbal_count; - debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + debug_event(dbf->hba, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } /** - * zfcp_hba_dbf_event_berr - trace event for bit error threshold - * @adapter: adapter affected by this QDIO related event + * zfcp_dbf_hba_berr - trace event for bit error threshold + * @dbf: dbf structure affected by this QDIO related event * @req: fsf request */ -void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter, - struct zfcp_fsf_req *req) +void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_hba_dbf_record *r = &dbf->hba_dbf_buf; + struct zfcp_dbf_hba_record *r = &dbf->hba_buf; struct fsf_status_read_buffer *sr_buf = req->data; struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; unsigned long flags; - spin_lock_irqsave(&dbf->hba_dbf_lock, flags); + spin_lock_irqsave(&dbf->hba_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); - debug_event(dbf->hba_dbf, 0, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->hba_dbf_lock, flags); + debug_event(dbf->hba, 0, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->hba_lock, flags); } -static void zfcp_hba_dbf_view_response(char **p, - struct zfcp_hba_dbf_record_response *r) +static void zfcp_dbf_hba_view_response(char **p, + struct zfcp_dbf_hba_record_response *r) { struct timespec t; @@ -380,8 +374,8 @@ static void zfcp_hba_dbf_view_response(char **p, } } -static void zfcp_hba_dbf_view_status(char **p, - struct zfcp_hba_dbf_record_status *r) +static void zfcp_dbf_hba_view_status(char **p, + struct zfcp_dbf_hba_record_status *r) { zfcp_dbf_out(p, "failed", "0x%02x", r->failed); zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); @@ -393,14 +387,14 @@ static void zfcp_hba_dbf_view_status(char **p, r->payload_size); } -static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r) +static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r) { zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); } -static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r) +static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r) { zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); @@ -424,10 +418,10 @@ static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r) r->current_transmit_b2b_credit); } -static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { - struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf; + struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf; char *p = out_buf; if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) @@ -438,45 +432,42 @@ static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_tag(&p, "tag2", r->tag2); if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_response(&p, &r->u.response); + zfcp_dbf_hba_view_response(&p, &r->u.response); else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_status(&p, &r->u.status); + zfcp_dbf_hba_view_status(&p, &r->u.status); else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_qdio(&p, &r->u.qdio); + zfcp_dbf_hba_view_qdio(&p, &r->u.qdio); else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) - zfcp_hba_dbf_view_berr(&p, &r->u.berr); + zfcp_dbf_hba_view_berr(&p, &r->u.berr); if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) p += sprintf(p, "\n"); return p - out_buf; } -static struct debug_view zfcp_hba_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_hba_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_hba_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_hba_view_format, }; -static const char *zfcp_rec_dbf_tags[] = { +static const char *zfcp_dbf_rec_tags[] = { [ZFCP_REC_DBF_ID_THREAD] = "thread", [ZFCP_REC_DBF_ID_TARGET] = "target", [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", [ZFCP_REC_DBF_ID_ACTION] = "action", }; -static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view, char *buf, const char *_rec) { - struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec; + struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec; char *p = buf; char hint[ZFCP_DBF_ID_SIZE + 1]; memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); hint[ZFCP_DBF_ID_SIZE] = 0; - zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]); + zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]); zfcp_dbf_outs(&p, "hint", hint); switch (r->id) { case ZFCP_REC_DBF_ID_THREAD: @@ -514,25 +505,22 @@ static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view, return p - buf; } -static struct debug_view zfcp_rec_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_rec_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_rec_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_rec_view_format, }; /** - * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation + * zfcp_dbf_rec_thread - trace event related to recovery thread operation * @id2: identifier for event - * @adapter: adapter + * @dbf: reference to dbf structure * This function assumes that the caller is holding erp_lock. */ -void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) +void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_adapter *adapter = dbf->adapter; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags = 0; struct list_head *entry; unsigned ready = 0, running = 0, total; @@ -543,42 +531,41 @@ void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter) running++; total = adapter->erp_total_count; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_THREAD; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); r->u.thread.total = total; r->u.thread.ready = ready; r->u.thread.running = running; - debug_event(dbf->rec_dbf, 6, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, 6, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation + * zfcp_dbf_rec_thread - trace event related to recovery thread operation * @id2: identifier for event * @adapter: adapter * This function assumes that the caller does not hold erp_lock. */ -void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter) +void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf) { + struct zfcp_adapter *adapter = dbf->adapter; unsigned long flags; read_lock_irqsave(&adapter->erp_lock, flags); - zfcp_rec_dbf_event_thread(id2, adapter); + zfcp_dbf_rec_thread(id2, dbf); read_unlock_irqrestore(&adapter->erp_lock, flags); } -static void zfcp_rec_dbf_event_target(char *id2, void *ref, - struct zfcp_adapter *adapter, - atomic_t *status, atomic_t *erp_count, - u64 wwpn, u32 d_id, u64 fcp_lun) +static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf, + atomic_t *status, atomic_t *erp_count, u64 wwpn, + u32 d_id, u64 fcp_lun) { - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TARGET; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -588,56 +575,57 @@ static void zfcp_rec_dbf_event_target(char *id2, void *ref, r->u.target.d_id = d_id; r->u.target.fcp_lun = fcp_lun; r->u.target.erp_count = atomic_read(erp_count); - debug_event(dbf->rec_dbf, 3, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, 3, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_rec_dbf_event_adapter - trace event for adapter state change + * zfcp_dbf_rec_adapter - trace event for adapter state change * @id: identifier for trigger of state change * @ref: additional reference (e.g. request) - * @adapter: adapter + * @dbf: reference to dbf structure */ -void zfcp_rec_dbf_event_adapter(char *id, void *ref, - struct zfcp_adapter *adapter) +void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf) { - zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status, + struct zfcp_adapter *adapter = dbf->adapter; + + zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, &adapter->erp_counter, 0, 0, 0); } /** - * zfcp_rec_dbf_event_port - trace event for port state change + * zfcp_dbf_rec_port - trace event for port state change * @id: identifier for trigger of state change * @ref: additional reference (e.g. request) * @port: port */ -void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port) +void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) { - struct zfcp_adapter *adapter = port->adapter; + struct zfcp_dbf *dbf = port->adapter->dbf; - zfcp_rec_dbf_event_target(id, ref, adapter, &port->status, + zfcp_dbf_rec_target(id, ref, dbf, &port->status, &port->erp_counter, port->wwpn, port->d_id, 0); } /** - * zfcp_rec_dbf_event_unit - trace event for unit state change + * zfcp_dbf_rec_unit - trace event for unit state change * @id: identifier for trigger of state change * @ref: additional reference (e.g. request) * @unit: unit */ -void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit) +void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit) { struct zfcp_port *port = unit->port; - struct zfcp_adapter *adapter = port->adapter; + struct zfcp_dbf *dbf = port->adapter->dbf; - zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status, + zfcp_dbf_rec_target(id, ref, dbf, &unit->status, &unit->erp_counter, port->wwpn, port->d_id, unit->fcp_lun); } /** - * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery + * zfcp_dbf_rec_trigger - trace event for triggered error recovery * @id2: identifier for error recovery trigger * @ref: additional reference (e.g. request) * @want: originally requested error recovery action @@ -647,15 +635,15 @@ void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit) * @port: port * @unit: unit */ -void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, - void *action, struct zfcp_adapter *adapter, - struct zfcp_port *port, struct zfcp_unit *unit) +void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, + struct zfcp_adapter *adapter, struct zfcp_port *port, + struct zfcp_unit *unit) { struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_TRIGGER; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -672,23 +660,22 @@ void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need, r->u.trigger.us = atomic_read(&unit->status); r->u.trigger.fcp_lun = unit->fcp_lun; } - debug_event(dbf->rec_dbf, action ? 1 : 4, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_rec_dbf_event_action - trace event showing progress of recovery action + * zfcp_dbf_rec_action - trace event showing progress of recovery action * @id2: identifier * @erp_action: error recovery action struct pointer */ -void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) +void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) { - struct zfcp_adapter *adapter = erp_action->adapter; - struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_rec_dbf_record *r = &dbf->rec_dbf_buf; + struct zfcp_dbf *dbf = erp_action->adapter->dbf; + struct zfcp_dbf_rec_record *r = &dbf->rec_buf; unsigned long flags; - spin_lock_irqsave(&dbf->rec_dbf_lock, flags); + spin_lock_irqsave(&dbf->rec_lock, flags); memset(r, 0, sizeof(*r)); r->id = ZFCP_REC_DBF_ID_ACTION; memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); @@ -696,27 +683,27 @@ void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action) r->u.action.status = erp_action->status; r->u.action.step = erp_action->step; r->u.action.fsf_req = (unsigned long)erp_action->fsf_req; - debug_event(dbf->rec_dbf, 5, r, sizeof(*r)); - spin_unlock_irqrestore(&dbf->rec_dbf_lock, flags); + debug_event(dbf->rec, 5, r, sizeof(*r)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); } /** - * zfcp_san_dbf_event_ct_request - trace event for issued CT request + * zfcp_dbf_san_ct_request - trace event for issued CT request * @fsf_req: request containing issued CT data */ -void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct zfcp_dbf *dbf = adapter->dbf; struct ct_hdr *hdr = sg_virt(ct->req); - struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; - struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req; + struct zfcp_dbf_san_record *r = &dbf->san_buf; + struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; int level = 3; unsigned long flags; - spin_lock_irqsave(&dbf->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -731,29 +718,29 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) oct->max_res_size = hdr->max_res_size; oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(dbf->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), oct->len); - spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_lock, flags); } /** - * zfcp_san_dbf_event_ct_response - trace event for completion of CT request + * zfcp_dbf_san_ct_response - trace event for completion of CT request * @fsf_req: request containing CT response */ -void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; struct zfcp_wka_port *wka_port = ct->wka_port; struct zfcp_adapter *adapter = wka_port->adapter; struct ct_hdr *hdr = sg_virt(ct->resp); struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_san_dbf_record *r = &dbf->san_dbf_buf; - struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp; + struct zfcp_dbf_san_record *r = &dbf->san_buf; + struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; int level = 3; unsigned long flags; - spin_lock_irqsave(&dbf->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); r->fsf_reqid = fsf_req->req_id; @@ -768,23 +755,22 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) rct->max_res_size = hdr->max_res_size; rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr), ZFCP_DBF_SAN_MAX_PAYLOAD); - debug_event(dbf->san_dbf, level, r, sizeof(*r)); - zfcp_dbf_hexdump(dbf->san_dbf, r, sizeof(*r), level, + debug_event(dbf->san, level, r, sizeof(*r)); + zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, (void *)hdr + sizeof(struct ct_hdr), rct->len); - spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_lock, flags); } -static void zfcp_san_dbf_event_els(const char *tag, int level, - struct zfcp_fsf_req *fsf_req, u32 s_id, - u32 d_id, u8 ls_code, void *buffer, - int buflen) +static void zfcp_dbf_san_els(const char *tag, int level, + struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, + u8 ls_code, void *buffer, int buflen) { struct zfcp_adapter *adapter = fsf_req->adapter; struct zfcp_dbf *dbf = adapter->dbf; - struct zfcp_san_dbf_record *rec = &dbf->san_dbf_buf; + struct zfcp_dbf_san_record *rec = &dbf->san_buf; unsigned long flags; - spin_lock_irqsave(&dbf->san_dbf_lock, flags); + spin_lock_irqsave(&dbf->san_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); rec->fsf_reqid = fsf_req->req_id; @@ -792,45 +778,45 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, rec->s_id = s_id; rec->d_id = d_id; rec->u.els.ls_code = ls_code; - debug_event(dbf->san_dbf, level, rec, sizeof(*rec)); - zfcp_dbf_hexdump(dbf->san_dbf, rec, sizeof(*rec), level, + debug_event(dbf->san, level, rec, sizeof(*rec)); + zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); - spin_unlock_irqrestore(&dbf->san_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->san_lock, flags); } /** - * zfcp_san_dbf_event_els_request - trace event for issued ELS + * zfcp_dbf_san_els_request - trace event for issued ELS * @fsf_req: request containing issued ELS */ -void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; - zfcp_san_dbf_event_els("oels", 2, fsf_req, + zfcp_dbf_san_els("oels", 2, fsf_req, fc_host_port_id(els->adapter->scsi_host), els->d_id, *(u8 *) sg_virt(els->req), sg_virt(els->req), els->req->length); } /** - * zfcp_san_dbf_event_els_response - trace event for completed ELS + * zfcp_dbf_san_els_response - trace event for completed ELS * @fsf_req: request containing ELS response */ -void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) { struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; - zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id, + zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id, fc_host_port_id(els->adapter->scsi_host), *(u8 *)sg_virt(els->req), sg_virt(els->resp), els->resp->length); } /** - * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS + * zfcp_dbf_san_incoming_els - trace event for incomig ELS * @fsf_req: request containing unsolicited status buffer with incoming ELS */ -void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) { struct zfcp_adapter *adapter = fsf_req->adapter; struct fsf_status_read_buffer *buf = @@ -838,16 +824,16 @@ void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) int length = (int)buf->length - (int)((void *)&buf->payload - (void *)buf); - zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id, + zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id, fc_host_port_id(adapter->scsi_host), buf->payload.data[0], (void *)buf->payload.data, length); } -static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { - struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf; + struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf; char *p = out_buf; if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) @@ -860,7 +846,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id); if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req; + struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); @@ -868,7 +854,7 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "options", "0x%02x", ct->options); zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp; + struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp; zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); @@ -878,34 +864,30 @@ static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view, } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { - struct zfcp_san_dbf_record_els *els = &r->u.els; + struct zfcp_dbf_san_record_els *els = &r->u.els; zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); } return p - out_buf; } -static struct debug_view zfcp_san_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_san_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_san_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_san_view_format, }; -void _zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, - struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, - struct zfcp_fsf_req *fsf_req, - unsigned long old_req_id) +void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, + struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, + struct zfcp_fsf_req *fsf_req, unsigned long old_req_id) { - struct zfcp_scsi_dbf_record *rec = &dbf->scsi_dbf_buf; + struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; unsigned long flags; struct fcp_rsp_iu *fcp_rsp; char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; int offset = 0, buflen = 0; - spin_lock_irqsave(&dbf->scsi_dbf_lock, flags); + spin_lock_irqsave(&dbf->scsi_lock, flags); do { memset(rec, 0, sizeof(*rec)); if (offset == 0) { @@ -959,20 +941,20 @@ void _zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, dump->offset = offset; dump->size = min(buflen - offset, (int)sizeof(struct - zfcp_scsi_dbf_record) - + zfcp_dbf_scsi_record) - (int)sizeof(struct zfcp_dbf_dump)); memcpy(dump->data, fcp_sns_info + offset, dump->size); offset += dump->size; } - debug_event(dbf->scsi_dbf, level, rec, sizeof(*rec)); + debug_event(dbf->scsi, level, rec, sizeof(*rec)); } while (offset < buflen); - spin_unlock_irqrestore(&dbf->scsi_dbf_lock, flags); + spin_unlock_irqrestore(&dbf->scsi_lock, flags); } -static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, +static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, char *out_buf, const char *in_buf) { - struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf; + struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf; struct timespec t; char *p = out_buf; @@ -1013,13 +995,10 @@ static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view, return p - out_buf; } -static struct debug_view zfcp_scsi_dbf_view = { - "structured", - NULL, - &zfcp_dbf_view_header, - &zfcp_scsi_dbf_view_format, - NULL, - NULL +static struct debug_view zfcp_dbf_scsi_view = { + .name = "structured", + .header_proc = zfcp_dbf_view_header, + .format_proc = zfcp_dbf_scsi_view_format, }; static debug_info_t *zfcp_dbf_reg(const char *name, int level, @@ -1043,7 +1022,7 @@ static debug_info_t *zfcp_dbf_reg(const char *name, int level, * @adapter: pointer to adapter for which debug features should be registered * return: -ENOMEM on error, 0 otherwise */ -int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) +int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) { char dbf_name[DEBUG_MAX_NAME_LEN]; struct zfcp_dbf *dbf; @@ -1052,63 +1031,60 @@ int zfcp_adapter_debug_register(struct zfcp_adapter *adapter) if (!dbf) return -ENOMEM; - spin_lock_init(&dbf->hba_dbf_lock); - spin_lock_init(&dbf->san_dbf_lock); - spin_lock_init(&dbf->scsi_dbf_lock); - spin_lock_init(&dbf->rec_dbf_lock); + dbf->adapter = adapter; + + spin_lock_init(&dbf->hba_lock); + spin_lock_init(&dbf->san_lock); + spin_lock_init(&dbf->scsi_lock); + spin_lock_init(&dbf->rec_lock); /* debug feature area which records recovery activity */ sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); - dbf->rec_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_rec_dbf_view, - sizeof(struct zfcp_rec_dbf_record)); - if (!dbf->rec_dbf) - goto fail_rec; + dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view, + sizeof(struct zfcp_dbf_rec_record)); + if (!dbf->rec) + goto err_out; /* debug feature area which records HBA (FSF and QDIO) conditions */ sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); - dbf->hba_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_hba_dbf_view, - sizeof(struct zfcp_hba_dbf_record)); - if (!dbf->hba_dbf) - goto fail_hba; + dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view, + sizeof(struct zfcp_dbf_hba_record)); + if (!dbf->hba) + goto err_out; /* debug feature area which records SAN command failures and recovery */ sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); - dbf->san_dbf = zfcp_dbf_reg(dbf_name, 6, &zfcp_san_dbf_view, - sizeof(struct zfcp_san_dbf_record)); - if (!dbf->san_dbf) - goto fail_san; + dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view, + sizeof(struct zfcp_dbf_san_record)); + if (!dbf->san) + goto err_out; /* debug feature area which records SCSI command failures and recovery */ sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); - dbf->scsi_dbf = zfcp_dbf_reg(dbf_name, 3, &zfcp_scsi_dbf_view, - sizeof(struct zfcp_scsi_dbf_record)); - if (!dbf->scsi_dbf) - goto fail_scsi; + dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view, + sizeof(struct zfcp_dbf_scsi_record)); + if (!dbf->scsi) + goto err_out; adapter->dbf = dbf; return 0; -fail_scsi: - debug_unregister(dbf->san_dbf); -fail_san: - debug_unregister(dbf->hba_dbf); -fail_hba: - debug_unregister(dbf->rec_dbf); -fail_rec: - kfree(dbf); +err_out: + zfcp_dbf_adapter_unregister(dbf); return -ENOMEM; } /** * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter - * @adapter: pointer to adapter for which debug features should be unregistered + * @dbf: pointer to dbf for which debug features should be unregistered */ -void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter) +void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) { - debug_unregister(adapter->dbf->scsi_dbf); - debug_unregister(adapter->dbf->san_dbf); - debug_unregister(adapter->dbf->hba_dbf); - debug_unregister(adapter->dbf->rec_dbf); - kfree(adapter->dbf); - adapter->dbf = NULL; + debug_unregister(dbf->scsi); + debug_unregister(dbf->san); + debug_unregister(dbf->hba); + debug_unregister(dbf->rec); + dbf->adapter->dbf = NULL; + kfree(dbf); } + diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index bceaff449033..6b1461e8f847 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h @@ -37,13 +37,13 @@ struct zfcp_dbf_dump { u8 data[]; /* dump data */ } __attribute__ ((packed)); -struct zfcp_rec_dbf_record_thread { +struct zfcp_dbf_rec_record_thread { u32 total; u32 ready; u32 running; }; -struct zfcp_rec_dbf_record_target { +struct zfcp_dbf_rec_record_target { u64 ref; u32 status; u32 d_id; @@ -52,7 +52,7 @@ struct zfcp_rec_dbf_record_target { u32 erp_count; }; -struct zfcp_rec_dbf_record_trigger { +struct zfcp_dbf_rec_record_trigger { u8 want; u8 need; u32 as; @@ -64,21 +64,21 @@ struct zfcp_rec_dbf_record_trigger { u64 fcp_lun; }; -struct zfcp_rec_dbf_record_action { +struct zfcp_dbf_rec_record_action { u32 status; u32 step; u64 action; u64 fsf_req; }; -struct zfcp_rec_dbf_record { +struct zfcp_dbf_rec_record { u8 id; char id2[7]; union { - struct zfcp_rec_dbf_record_action action; - struct zfcp_rec_dbf_record_thread thread; - struct zfcp_rec_dbf_record_target target; - struct zfcp_rec_dbf_record_trigger trigger; + struct zfcp_dbf_rec_record_action action; + struct zfcp_dbf_rec_record_thread thread; + struct zfcp_dbf_rec_record_target target; + struct zfcp_dbf_rec_record_trigger trigger; } u; }; @@ -89,7 +89,7 @@ enum { ZFCP_REC_DBF_ID_TRIGGER, }; -struct zfcp_hba_dbf_record_response { +struct zfcp_dbf_hba_record_response { u32 fsf_command; u64 fsf_reqid; u32 fsf_seqno; @@ -127,7 +127,7 @@ struct zfcp_hba_dbf_record_response { } u; } __attribute__ ((packed)); -struct zfcp_hba_dbf_record_status { +struct zfcp_dbf_hba_record_status { u8 failed; u32 status_type; u32 status_subtype; @@ -141,24 +141,24 @@ struct zfcp_hba_dbf_record_status { u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; } __attribute__ ((packed)); -struct zfcp_hba_dbf_record_qdio { +struct zfcp_dbf_hba_record_qdio { u32 qdio_error; u8 sbal_index; u8 sbal_count; } __attribute__ ((packed)); -struct zfcp_hba_dbf_record { +struct zfcp_dbf_hba_record { u8 tag[ZFCP_DBF_TAG_SIZE]; u8 tag2[ZFCP_DBF_TAG_SIZE]; union { - struct zfcp_hba_dbf_record_response response; - struct zfcp_hba_dbf_record_status status; - struct zfcp_hba_dbf_record_qdio qdio; + struct zfcp_dbf_hba_record_response response; + struct zfcp_dbf_hba_record_status status; + struct zfcp_dbf_hba_record_qdio qdio; struct fsf_bit_error_payload berr; } u; } __attribute__ ((packed)); -struct zfcp_san_dbf_record_ct_request { +struct zfcp_dbf_san_record_ct_request { u16 cmd_req_code; u8 revision; u8 gs_type; @@ -168,7 +168,7 @@ struct zfcp_san_dbf_record_ct_request { u32 len; } __attribute__ ((packed)); -struct zfcp_san_dbf_record_ct_response { +struct zfcp_dbf_san_record_ct_response { u16 cmd_rsp_code; u8 revision; u8 reason_code; @@ -178,27 +178,27 @@ struct zfcp_san_dbf_record_ct_response { u32 len; } __attribute__ ((packed)); -struct zfcp_san_dbf_record_els { +struct zfcp_dbf_san_record_els { u8 ls_code; u32 len; } __attribute__ ((packed)); -struct zfcp_san_dbf_record { +struct zfcp_dbf_san_record { u8 tag[ZFCP_DBF_TAG_SIZE]; u64 fsf_reqid; u32 fsf_seqno; u32 s_id; u32 d_id; union { - struct zfcp_san_dbf_record_ct_request ct_req; - struct zfcp_san_dbf_record_ct_response ct_resp; - struct zfcp_san_dbf_record_els els; + struct zfcp_dbf_san_record_ct_request ct_req; + struct zfcp_dbf_san_record_ct_response ct_resp; + struct zfcp_dbf_san_record_els els; } u; #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 u8 payload[32]; } __attribute__ ((packed)); -struct zfcp_scsi_dbf_record { +struct zfcp_dbf_scsi_record { u8 tag[ZFCP_DBF_TAG_SIZE]; u8 tag2[ZFCP_DBF_TAG_SIZE]; u32 scsi_id; @@ -225,86 +225,84 @@ struct zfcp_scsi_dbf_record { } __attribute__ ((packed)); struct zfcp_dbf { - debug_info_t *rec_dbf; - debug_info_t *hba_dbf; - debug_info_t *san_dbf; - debug_info_t *scsi_dbf; - spinlock_t rec_dbf_lock; - spinlock_t hba_dbf_lock; - spinlock_t san_dbf_lock; - spinlock_t scsi_dbf_lock; - struct zfcp_rec_dbf_record rec_dbf_buf; - struct zfcp_hba_dbf_record hba_dbf_buf; - struct zfcp_san_dbf_record san_dbf_buf; - struct zfcp_scsi_dbf_record scsi_dbf_buf; + debug_info_t *rec; + debug_info_t *hba; + debug_info_t *san; + debug_info_t *scsi; + spinlock_t rec_lock; + spinlock_t hba_lock; + spinlock_t san_lock; + spinlock_t scsi_lock; + struct zfcp_dbf_rec_record rec_buf; + struct zfcp_dbf_hba_record hba_buf; + struct zfcp_dbf_san_record san_buf; + struct zfcp_dbf_scsi_record scsi_buf; + struct zfcp_adapter *adapter; }; static inline -void zfcp_hba_dbf_event_fsf_resp(const char *tag2, int level, - struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) +void zfcp_dbf_hba_fsf_resp(const char *tag2, int level, + struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) { - if (level <= dbf->hba_dbf->level) - _zfcp_hba_dbf_event_fsf_response(tag2, level, req, dbf); + if (level <= dbf->hba->level) + _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf); } /** - * zfcp_hba_dbf_event_fsf_response - trace event for request completion + * zfcp_dbf_hba_fsf_response - trace event for request completion * @fsf_req: request that has been completed */ -static inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *req) +static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) { struct zfcp_dbf *dbf = req->adapter->dbf; struct fsf_qtcb *qtcb = req->qtcb; if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { - zfcp_hba_dbf_event_fsf_resp("perr", 1, req, dbf); + zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf); } else if (qtcb->header.fsf_status != FSF_GOOD) { - zfcp_hba_dbf_event_fsf_resp("ferr", 1, req, dbf); + zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf); } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || (req->fsf_command == FSF_QTCB_OPEN_LUN)) { - zfcp_hba_dbf_event_fsf_resp("open", 4, req, dbf); + zfcp_dbf_hba_fsf_resp("open", 4, req, dbf); } else if (qtcb->header.log_length) { - zfcp_hba_dbf_event_fsf_resp("qtcb", 5, req, dbf); + zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf); } else { - zfcp_hba_dbf_event_fsf_resp("norm", 6, req, dbf); + zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf); } } /** - * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer + * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer * @tag: tag indicating which kind of unsolicited status has been received - * @adapter: adapter that has issued the unsolicited status buffer + * @dbf: reference to dbf structure * @status_buffer: buffer containing payload of unsolicited status */ static inline -void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, - struct fsf_status_read_buffer *buf) +void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf, + struct fsf_status_read_buffer *buf) { - struct zfcp_dbf *dbf = adapter->dbf; int level = 2; - if (level <= dbf->hba_dbf->level) - _zfcp_hba_dbf_event_fsf_unsol(tag, level, adapter, buf); + if (level <= dbf->hba->level) + _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf); } static inline -void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, - struct zfcp_adapter *adapter, struct scsi_cmnd *scmd, - struct zfcp_fsf_req *req, unsigned long old_id) +void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, + struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, + struct zfcp_fsf_req *req, unsigned long old_id) { - struct zfcp_dbf *dbf = adapter->dbf; - - if (level <= dbf->scsi_dbf->level) - _zfcp_scsi_dbf_event(tag, tag2, level, dbf, scmd, req, old_id); + if (level <= dbf->scsi->level) + _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id); } /** - * zfcp_scsi_dbf_event_result - trace event for SCSI command completion + * zfcp_dbf_scsi_result - trace event for SCSI command completion * @tag: tag indicating success or failure of SCSI command * @level: trace level applicable for this event * @adapter: adapter that has been used to issue the SCSI command @@ -312,16 +310,14 @@ void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, * @fsf_req: request used to issue SCSI command (might be NULL) */ static inline -void zfcp_scsi_dbf_event_result(const char *tag, int level, - struct zfcp_adapter *adapter, - struct scsi_cmnd *scmd, - struct zfcp_fsf_req *fsf_req) +void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf, + struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req) { - zfcp_scsi_dbf_event("rslt", tag, level, adapter, scmd, fsf_req, 0); + zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0); } /** - * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort + * zfcp_dbf_scsi_abort - trace event for SCSI command abort * @tag: tag indicating success or failure of abort operation * @adapter: adapter thas has been used to issue SCSI command to be aborted * @scmd: SCSI command to be aborted @@ -329,28 +325,26 @@ void zfcp_scsi_dbf_event_result(const char *tag, int level, * @old_id: identifier of request containg SCSI command to be aborted */ static inline -void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, - struct scsi_cmnd *scmd, - struct zfcp_fsf_req *new_req, - unsigned long old_id) +void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, + struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req, + unsigned long old_id) { - zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scmd, new_req, old_id); + zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id); } /** - * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset + * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset * @tag: tag indicating success or failure of reset operation * @flag: indicates type of reset (Target Reset, Logical Unit Reset) * @unit: unit that needs reset * @scsi_cmnd: SCSI command which caused this error recovery */ static inline -void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, - struct zfcp_unit *unit, - struct scsi_cmnd *scsi_cmnd) +void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, + struct scsi_cmnd *scsi_cmnd) { - zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, - unit->port->adapter, scsi_cmnd, NULL, 0); + zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, + unit->port->adapter->dbf, scsi_cmnd, NULL, 0); } #endif /* ZFCP_DBF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 67297d2744fb..373567eda8f6 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -74,9 +74,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) struct zfcp_adapter *adapter = act->adapter; list_move(&act->list, &act->adapter->erp_ready_head); - zfcp_rec_dbf_event_action("erardy1", act); + zfcp_dbf_rec_action("erardy1", act); up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread("erardy2", adapter); + zfcp_dbf_rec_thread("erardy2", adapter->dbf); } static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) @@ -227,11 +227,10 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread("eracte1", adapter); + zfcp_dbf_rec_thread("eracte1", adapter->dbf); retval = 0; out: - zfcp_rec_dbf_event_trigger(id, ref, want, need, act, - adapter, port, unit); + zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit); return retval; } @@ -442,28 +441,28 @@ static int status_change_clear(unsigned long mask, atomic_t *status) static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) - zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter); + zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); } static void zfcp_erp_port_unblock(struct zfcp_port *port) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) - zfcp_rec_dbf_event_port("erpubl1", NULL, port); + zfcp_dbf_rec_port("erpubl1", NULL, port); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); } static void zfcp_erp_unit_unblock(struct zfcp_unit *unit) { if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status)) - zfcp_rec_dbf_event_unit("eruubl1", NULL, unit); + zfcp_dbf_rec_unit("eruubl1", NULL, unit); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status); } static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) { list_move(&erp_action->list, &erp_action->adapter->erp_running_head); - zfcp_rec_dbf_event_action("erator1", erp_action); + zfcp_dbf_rec_action("erator1", erp_action); } static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) @@ -479,11 +478,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) if (act->status & (ZFCP_STATUS_ERP_DISMISSED | ZFCP_STATUS_ERP_TIMEDOUT)) { act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; - zfcp_rec_dbf_event_action("erscf_1", act); + zfcp_dbf_rec_action("erscf_1", act); act->fsf_req->erp_action = NULL; } if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) - zfcp_rec_dbf_event_action("erscf_2", act); + zfcp_dbf_rec_action("erscf_2", act); if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED) act->fsf_req = NULL; } else @@ -641,9 +640,9 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) return ZFCP_ERP_FAILED; } - zfcp_rec_dbf_event_thread_lock("erasfx1", adapter); + zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf); down(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erasfx2", adapter); + zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf); if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) break; @@ -682,9 +681,9 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) if (ret) return ZFCP_ERP_FAILED; - zfcp_rec_dbf_event_thread_lock("erasox1", adapter); + zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); down(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erasox2", adapter); + zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) return ZFCP_ERP_FAILED; @@ -1138,7 +1137,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) } list_del(&erp_action->list); - zfcp_rec_dbf_event_action("eractd1", erp_action); + zfcp_dbf_rec_action("eractd1", erp_action); switch (erp_action->action) { case ZFCP_ERP_ACTION_REOPEN_UNIT: @@ -1297,9 +1296,9 @@ static int zfcp_erp_thread(void *data) while (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) { - zfcp_rec_dbf_event_thread_lock("erthrd1", adapter); + zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf); ignore = down_interruptible(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erthrd2", adapter); + zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf); write_lock_irqsave(&adapter->erp_lock, flags); next = adapter->erp_ready_head.next; @@ -1356,7 +1355,7 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) { atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); up(&adapter->erp_ready_sem); - zfcp_rec_dbf_event_thread_lock("erthrk1", adapter); + zfcp_dbf_rec_thread_lock("erthrk1", adapter->dbf); wait_event(adapter->erp_thread_wqh, !(atomic_read(&adapter->status) & @@ -1431,11 +1430,11 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id, if (set_or_clear == ZFCP_SET) { if (status_change_set(mask, &adapter->status)) - zfcp_rec_dbf_event_adapter(id, ref, adapter); + zfcp_dbf_rec_adapter(id, ref, adapter->dbf); atomic_set_mask(mask, &adapter->status); } else { if (status_change_clear(mask, &adapter->status)) - zfcp_rec_dbf_event_adapter(id, ref, adapter); + zfcp_dbf_rec_adapter(id, ref, adapter->dbf); atomic_clear_mask(mask, &adapter->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) atomic_set(&adapter->erp_counter, 0); @@ -1465,11 +1464,11 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref, if (set_or_clear == ZFCP_SET) { if (status_change_set(mask, &port->status)) - zfcp_rec_dbf_event_port(id, ref, port); + zfcp_dbf_rec_port(id, ref, port); atomic_set_mask(mask, &port->status); } else { if (status_change_clear(mask, &port->status)) - zfcp_rec_dbf_event_port(id, ref, port); + zfcp_dbf_rec_port(id, ref, port); atomic_clear_mask(mask, &port->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) atomic_set(&port->erp_counter, 0); @@ -1494,11 +1493,11 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref, { if (set_or_clear == ZFCP_SET) { if (status_change_set(mask, &unit->status)) - zfcp_rec_dbf_event_unit(id, ref, unit); + zfcp_dbf_rec_unit(id, ref, unit); atomic_set_mask(mask, &unit->status); } else { if (status_change_clear(mask, &unit->status)) - zfcp_rec_dbf_event_unit(id, ref, unit); + zfcp_dbf_rec_unit(id, ref, unit); atomic_clear_mask(mask, &unit->status); if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) { atomic_set(&unit->erp_counter, 0); diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 7650cec7f71f..a49d00921b92 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -34,35 +34,31 @@ extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); extern struct miscdevice zfcp_cfdc_misc; /* zfcp_dbf.c */ -extern int zfcp_adapter_debug_register(struct zfcp_adapter *); -extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *); -extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *); -extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *); -extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *, - struct zfcp_adapter *, - struct zfcp_port *, struct zfcp_unit *); -extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *); -extern void _zfcp_hba_dbf_event_fsf_response(const char *, int level, - struct zfcp_fsf_req *, - struct zfcp_dbf *dbf); -extern void _zfcp_hba_dbf_event_fsf_unsol(const char *, int level, - struct zfcp_adapter *, +extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); +extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *); +extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); +extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); +extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); +extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); +extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *); +extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, + struct zfcp_adapter *, struct zfcp_port *, + struct zfcp_unit *); +extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); +extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, + struct zfcp_dbf *); +extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *, struct fsf_status_read_buffer *); -extern void zfcp_hba_dbf_event_qdio(struct zfcp_qdio *, unsigned int, int, - int); -extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *, - struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *); -extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *); -extern void _zfcp_scsi_dbf_event(const char *, const char *, int, - struct zfcp_dbf *, struct scsi_cmnd *, - struct zfcp_fsf_req *, unsigned long); +extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); +extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); +extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); +extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *); +extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *, + struct scsi_cmnd *, struct zfcp_fsf_req *, + unsigned long); /* zfcp_erp.c */ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *, diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index bc0c9f54d0d8..309f1dfad3f9 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -242,7 +242,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) (struct fsf_status_read_buffer *) fsf_req->data; unsigned int els_type = status_buffer->payload.data[0]; - zfcp_san_dbf_event_incoming_els(fsf_req); + zfcp_dbf_san_incoming_els(fsf_req); if (els_type == LS_PLOGI) zfcp_fc_incoming_plogi(fsf_req); else if (els_type == LS_LOGO) diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 048f1a848f34..665967f049a8 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -248,13 +248,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) struct fsf_status_read_buffer *sr_buf = req->data; if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { - zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf); + zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf); mempool_free(sr_buf, adapter->pool.status_read_data); zfcp_fsf_req_free(req); return; } - zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf); + zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf); switch (sr_buf->status_type) { case FSF_STATUS_READ_PORT_CLOSED: @@ -269,7 +269,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) dev_warn(&adapter->ccw_device->dev, "The error threshold for checksum statistics " "has been exceeded\n"); - zfcp_hba_dbf_event_berr(adapter, req); + zfcp_dbf_hba_berr(adapter->dbf, req); break; case FSF_STATUS_READ_LINK_DOWN: zfcp_fsf_status_read_link_down(req); @@ -355,7 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) struct fsf_qtcb *qtcb = req->qtcb; union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; - zfcp_hba_dbf_event_fsf_response(req); + zfcp_dbf_hba_fsf_response(req); if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { req->status |= ZFCP_STATUS_FSFREQ_ERROR | @@ -848,7 +848,7 @@ failed_req_send: mempool_free(sr_buf, adapter->pool.status_read_data); failed_buf: zfcp_fsf_req_free(req); - zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL); + zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); out: spin_unlock_bh(&qdio->req_q_lock); return retval; @@ -968,7 +968,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_san_dbf_event_ct_response(req); + zfcp_dbf_san_ct_response(req); send_ct->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -1100,7 +1100,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) req->qtcb->bottom.support.timeout = ct->timeout; req->data = ct; - zfcp_san_dbf_event_ct_request(req); + zfcp_dbf_san_ct_request(req); zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); @@ -1129,7 +1129,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) switch (header->fsf_status) { case FSF_GOOD: - zfcp_san_dbf_event_els_response(req); + zfcp_dbf_san_els_response(req); send_els->status = 0; break; case FSF_SERVICE_CLASS_NOT_SUPPORTED: @@ -1203,7 +1203,7 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) bottom->timeout = 2 * R_A_TOV; req->data = els; - zfcp_san_dbf_event_els_request(req); + zfcp_dbf_san_els_request(req); zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); @@ -2213,11 +2213,11 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) } skip_fsfstatus: if (scpnt->result != 0) - zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req); + zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); else if (scpnt->retries > 0) - zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req); + zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req); else - zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req); + zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req); scpnt->host_scribble = NULL; (scpnt->scsi_done) (scpnt); diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 0b3f634509bf..84527ebbbe58 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -101,7 +101,8 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, struct zfcp_qdio_queue *queue = &qdio->req_q; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, + count); zfcp_qdio_handler_error(qdio, "qdireq1"); return; } @@ -143,7 +144,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, int sbal_idx, sbal_no; if (unlikely(qdio_err)) { - zfcp_hba_dbf_event_qdio(qdio, qdio_err, first, count); + zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first, + count); zfcp_qdio_handler_error(qdio, "qdires1"); return; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 4414720c87f6..b6177ad2d5bf 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -53,11 +53,11 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp) static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) { + struct zfcp_adapter *adapter = + (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; set_host_byte(scpnt, result); if ((scpnt->device != NULL) && (scpnt->device->host != NULL)) - zfcp_scsi_dbf_event_result("fail", 4, - (struct zfcp_adapter*) scpnt->device->host->hostdata[0], - scpnt, NULL); + zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); /* return directly */ scpnt->scsi_done(scpnt); } @@ -93,7 +93,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; - zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL); + zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL); scpnt->scsi_done(scpnt); return 0; } @@ -181,8 +181,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) spin_unlock(&adapter->req_list_lock); if (!old_req) { write_unlock_irqrestore(&adapter->abort_lock, flags); - zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL, - old_reqid); + zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, + old_reqid); return FAILED; /* completion could be in progress */ } old_req->data = NULL; @@ -198,8 +198,8 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) zfcp_erp_wait(adapter); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL, - old_reqid); + zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, + old_reqid); return SUCCESS; } } @@ -216,7 +216,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) dbf_tag = "fail"; retval = FAILED; } - zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid); + zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid); zfcp_fsf_req_free(abrt_req); return retval; } @@ -237,8 +237,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) zfcp_erp_wait(adapter); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { - zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit, - scpnt); + zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); return SUCCESS; } } @@ -248,13 +247,13 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { - zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); retval = FAILED; } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) { - zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt); retval = FAILED; } else - zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt); + zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); zfcp_fsf_req_free(fsf_req); return retval; -- cgit v1.2.3 From d5a282a1c5084ec7ebd9e6ab9723317f6b3fcd7b Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:22 +0200 Subject: [SCSI] zfcp: introduce _setup, _destroy for qdio and FC Extract independent data structures and introduce common _setup and _destroy routines for QDIO and Fibre Channel related data structures Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 42 +++++++++----------------- drivers/s390/scsi/zfcp_ext.h | 7 +++-- drivers/s390/scsi/zfcp_fc.c | 36 ++++++++++++++++------- drivers/s390/scsi/zfcp_qdio.c | 68 +++++++++++++++++++++++++++++-------------- 4 files changed, 89 insertions(+), 64 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 7a50f64c36bd..c77686ed938c 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -506,30 +506,18 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (!adapter) return -ENOMEM; - adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); - if (!adapter->gs) { - kfree(adapter); - return -ENOMEM; - } - - adapter->qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); - if (!adapter->qdio) - goto qdio_mem_failed; - - adapter->qdio->adapter = adapter; - ccw_device->handler = NULL; adapter->ccw_device = ccw_device; atomic_set(&adapter->refcount, 0); - if (zfcp_qdio_allocate(adapter->qdio, ccw_device)) - goto qdio_allocate_failed; + if (zfcp_qdio_setup(adapter)) + goto qdio_failed; if (zfcp_allocate_low_mem_buffers(adapter)) - goto failed_low_mem_buffers; + goto low_mem_buffers_failed; if (zfcp_reqlist_alloc(adapter)) - goto failed_low_mem_buffers; + goto low_mem_buffers_failed; if (zfcp_dbf_adapter_register(adapter)) goto debug_register_failed; @@ -537,6 +525,9 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) if (zfcp_setup_adapter_work_queue(adapter)) goto work_queue_failed; + if (zfcp_fc_gs_setup(adapter)) + goto generic_services_failed; + init_waitqueue_head(&adapter->remove_wq); init_waitqueue_head(&adapter->erp_thread_wqh); init_waitqueue_head(&adapter->erp_done_wqh); @@ -547,9 +538,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) spin_lock_init(&adapter->req_list_lock); - spin_lock_init(&adapter->qdio->req_q_lock); - spin_lock_init(&adapter->qdio->stat_lock); - rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); @@ -570,24 +558,23 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto sysfs_failed; atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); - zfcp_fc_wka_ports_init(adapter); if (!zfcp_adapter_scsi_register(adapter)) return 0; sysfs_failed: + zfcp_fc_gs_destroy(adapter); +generic_services_failed: zfcp_destroy_adapter_work_queue(adapter); work_queue_failed: zfcp_dbf_adapter_unregister(adapter->dbf); debug_register_failed: dev_set_drvdata(&ccw_device->dev, NULL); kfree(adapter->req_list); -failed_low_mem_buffers: +low_mem_buffers_failed: zfcp_free_low_mem_buffers(adapter); -qdio_allocate_failed: - zfcp_qdio_free(adapter->qdio); - kfree(adapter->qdio); -qdio_mem_failed: +qdio_failed: + zfcp_qdio_destroy(adapter->qdio); kfree(adapter); return -ENOMEM; } @@ -616,15 +603,14 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) if (!retval) return; + zfcp_fc_gs_destroy(adapter); zfcp_destroy_adapter_work_queue(adapter); zfcp_dbf_adapter_unregister(adapter->dbf); - zfcp_qdio_free(adapter->qdio); zfcp_free_low_mem_buffers(adapter); + zfcp_qdio_destroy(adapter->qdio); kfree(adapter->req_list); kfree(adapter->fc_stats); kfree(adapter->stats_reset_data); - kfree(adapter->gs); - kfree(adapter->qdio); kfree(adapter); } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index a49d00921b92..cf09b2838c50 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -100,7 +100,8 @@ extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); -extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *); +extern int zfcp_fc_gs_setup(struct zfcp_adapter *); +extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *); @@ -134,8 +135,8 @@ extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long, extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int); /* zfcp_qdio.c */ -extern int zfcp_qdio_allocate(struct zfcp_qdio *, struct ccw_device *); -extern void zfcp_qdio_free(struct zfcp_qdio *); +extern int zfcp_qdio_setup(struct zfcp_adapter *); +extern void zfcp_qdio_destroy(struct zfcp_qdio *); extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *); extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 309f1dfad3f9..db8bb41a7794 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -141,17 +141,6 @@ void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) zfcp_fc_wka_port_force_offline(&gs->ks); } -void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter) -{ - struct zfcp_wka_ports *gs = adapter->gs; - - zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter); - zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter); - zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter); - zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter); - zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter); -} - static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, struct fcp_rscn_element *elem) { @@ -870,3 +859,28 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) } return ret; } + +int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) +{ + struct zfcp_wka_ports *wka_ports; + + wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); + if (!wka_ports) + return -ENOMEM; + + adapter->gs = wka_ports; + zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter); + zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter); + zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter); + zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter); + zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter); + + return 0; +} + +void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter) +{ + kfree(adapter->gs); + adapter->gs = NULL; +} + diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 84527ebbbe58..2b499e28ff1f 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -34,27 +34,6 @@ zfcp_qdio_sbale(struct zfcp_qdio_queue *q, int sbal_idx, int sbale_idx) return &q->sbal[sbal_idx]->element[sbale_idx]; } -/** - * zfcp_qdio_free - free memory used by request- and resposne queue - * @qdio: pointer to the zfcp_qdio structure - */ -void zfcp_qdio_free(struct zfcp_qdio *qdio) -{ - struct qdio_buffer **sbal_req, **sbal_resp; - int p; - - if (qdio->adapter->ccw_device) - qdio_free(qdio->adapter->ccw_device); - - sbal_req = qdio->req_q.sbal; - sbal_resp = qdio->resp_q.sbal; - - for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { - free_page((unsigned long) sbal_req[p]); - free_page((unsigned long) sbal_resp[p]); - } -} - static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id) { struct zfcp_adapter *adapter = qdio->adapter; @@ -383,7 +362,7 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, * Returns: -ENOMEM on memory allocation error or return value from * qdio_allocate */ -int zfcp_qdio_allocate(struct zfcp_qdio *qdio, struct ccw_device *ccw_dev) +static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) { struct qdio_initialize init_data; @@ -477,3 +456,48 @@ failed_establish: "Setting up the QDIO connection to the FCP adapter failed\n"); return -EIO; } + +void zfcp_qdio_destroy(struct zfcp_qdio *qdio) +{ + struct qdio_buffer **sbal_req, **sbal_resp; + int p; + + if (!qdio) + return; + + if (qdio->adapter->ccw_device) + qdio_free(qdio->adapter->ccw_device); + + sbal_req = qdio->req_q.sbal; + sbal_resp = qdio->resp_q.sbal; + + for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { + free_page((unsigned long) sbal_req[p]); + free_page((unsigned long) sbal_resp[p]); + } + + kfree(qdio); +} + +int zfcp_qdio_setup(struct zfcp_adapter *adapter) +{ + struct zfcp_qdio *qdio; + + qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); + if (!qdio) + return -ENOMEM; + + qdio->adapter = adapter; + + if (zfcp_qdio_allocate(qdio)) { + zfcp_qdio_destroy(qdio); + return -ENOMEM; + } + + spin_lock_init(&qdio->req_q_lock); + spin_lock_init(&qdio->stat_lock); + + adapter->qdio = qdio; + return 0; +} + -- cgit v1.2.3 From 6f53a2d2ecaefa3ffff8864f51a3ae38737e1152 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:23 +0200 Subject: [SCSI] zfcp: Apply common naming conventions to zfcp_fc Update the Fibre Channel related code to use the zfcp_fc prefix. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 2 +- drivers/s390/scsi/zfcp_ext.h | 6 ++--- drivers/s390/scsi/zfcp_fc.c | 51 +++++++++++++++++++++--------------------- drivers/s390/scsi/zfcp_fsf.c | 10 ++++----- drivers/s390/scsi/zfcp_sysfs.c | 2 +- 5 files changed, 35 insertions(+), 36 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index c77686ed938c..23b85a03e26a 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -544,7 +544,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) sema_init(&adapter->erp_ready_sem, 0); INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); - INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); + INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); adapter->service_level.seq_print = zfcp_print_sl; diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index cf09b2838c50..36935bc0818f 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -92,12 +92,12 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *, extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ -extern int zfcp_scan_ports(struct zfcp_adapter *); -extern void _zfcp_scan_ports_later(struct work_struct *); +extern int zfcp_fc_scan_ports(struct zfcp_adapter *); +extern void _zfcp_fc_scan_ports_later(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fc_port_did_lookup(struct work_struct *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); -extern void zfcp_test_link(struct zfcp_port *); +extern void zfcp_fc_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); extern int zfcp_fc_gs_setup(struct zfcp_adapter *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index db8bb41a7794..7433da900fab 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -57,7 +57,7 @@ struct zfcp_fc_ns_handler_data { unsigned long handler_data; }; -static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) +static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port) { if (mutex_lock_interruptible(&wka_port->mutex)) return -ERESTARTSYS; @@ -82,7 +82,7 @@ static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port) return -EIO; } -static void zfcp_wka_port_offline(struct work_struct *work) +static void zfcp_fc_wka_port_offline(struct work_struct *work) { struct delayed_work *dw = to_delayed_work(work); struct zfcp_wka_port *wka_port = @@ -102,7 +102,7 @@ out: mutex_unlock(&wka_port->mutex); } -static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port) +static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) { if (atomic_dec_return(&wka_port->refcount) != 0) return; @@ -121,7 +121,7 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, wka_port->status = ZFCP_WKA_PORT_OFFLINE; atomic_set(&wka_port->refcount, 0); mutex_init(&wka_port->mutex); - INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline); + INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); } static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) @@ -150,7 +150,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, read_lock_irqsave(&zfcp_data.config_lock, flags); list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { if ((port->d_id & range) == (elem->nport_did & range)) - zfcp_test_link(port); + zfcp_fc_test_link(port); if (!port->d_id) zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, @@ -326,13 +326,13 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) memset(gid_pn, 0, sizeof(*gid_pn)); - ret = zfcp_wka_port_get(&adapter->gs->ds); + ret = zfcp_fc_wka_port_get(&adapter->gs->ds); if (ret) goto out; ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); - zfcp_wka_port_put(&adapter->gs->ds); + zfcp_fc_wka_port_put(&adapter->gs->ds); out: mempool_free(gid_pn, adapter->pool.gid_pn_data); return ret; @@ -482,14 +482,14 @@ out: } /** - * zfcp_test_link - lightweight link test procedure + * zfcp_fc_test_link - lightweight link test procedure * @port: port to be tested * * Test status of a link to a remote port using the ELS command ADISC. * If there is a problem with the remote port, error recovery steps * will be triggered. */ -void zfcp_test_link(struct zfcp_port *port) +void zfcp_fc_test_link(struct zfcp_port *port) { zfcp_port_get(port); if (!queue_work(port->adapter->work_queue, &port->test_link_work)) @@ -532,9 +532,8 @@ out: } -static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, - struct zfcp_adapter *adapter, - int max_bytes) +static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, + struct zfcp_adapter *adapter, int max_bytes) { struct zfcp_send_ct *ct = &gpn_ft->ct; struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); @@ -569,7 +568,7 @@ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft, return ret; } -static void zfcp_validate_port(struct zfcp_port *port) +static void zfcp_fc_validate_port(struct zfcp_port *port) { struct zfcp_adapter *adapter = port->adapter; @@ -589,7 +588,7 @@ static void zfcp_validate_port(struct zfcp_port *port) zfcp_port_dequeue(port); } -static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) +static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) { struct zfcp_send_ct *ct = &gpn_ft->ct; struct scatterlist *sg = gpn_ft->sg_resp; @@ -649,16 +648,16 @@ static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) zfcp_erp_wait(adapter); list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) - zfcp_validate_port(port); + zfcp_fc_validate_port(port); up(&zfcp_data.config_sema); return ret; } /** - * zfcp_scan_ports - scan remote ports and attach new ports + * zfcp_fc_scan_ports - scan remote ports and attach new ports * @adapter: pointer to struct zfcp_adapter */ -int zfcp_scan_ports(struct zfcp_adapter *adapter) +int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) { int ret, i; struct zfcp_gpn_ft *gpn_ft; @@ -673,7 +672,7 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) return 0; - ret = zfcp_wka_port_get(&adapter->gs->ds); + ret = zfcp_fc_wka_port_get(&adapter->gs->ds); if (ret) return ret; @@ -684,9 +683,9 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) } for (i = 0; i < 3; i++) { - ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes); + ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); if (!ret) { - ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries); + ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries); if (ret == -EAGAIN) ssleep(1); else @@ -695,14 +694,14 @@ int zfcp_scan_ports(struct zfcp_adapter *adapter) } zfcp_free_sg_env(gpn_ft, buf_num); out: - zfcp_wka_port_put(&adapter->gs->ds); + zfcp_fc_wka_port_put(&adapter->gs->ds); return ret; } -void _zfcp_scan_ports_later(struct work_struct *work) +void _zfcp_fc_scan_ports_later(struct work_struct *work) { - zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); + zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); } struct zfcp_els_fc_job { @@ -792,7 +791,7 @@ static void zfcp_fc_generic_ct_handler(unsigned long data) job->state_flags = FC_RQST_STATE_DONE; job->job_done(job); - zfcp_wka_port_put(ct_fc_job->ct.wka_port); + zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); kfree(ct_fc_job); } @@ -838,7 +837,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) return -EINVAL; /* no such service */ } - ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port); + ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port); if (ret) { kfree(ct_fc_job); return ret; @@ -855,7 +854,7 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL); if (ret) { kfree(ct_fc_job); - zfcp_wka_port_put(ct_fc_job->ct.wka_port); + zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); } return ret; } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 665967f049a8..c241f032fd49 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -892,7 +892,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (fsq->word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -1139,7 +1139,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) switch (header->fsf_status_qual.word[0]){ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: if (port && (send_els->ls_code != ZFCP_LS_ADISC)) - zfcp_test_link(port); + zfcp_fc_test_link(port); /*fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: case FSF_SQ_RETRY_IF_POSSIBLE: @@ -1889,7 +1889,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (header->fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -2024,7 +2024,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: switch (req->qtcb->header.fsf_status_qual.word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; @@ -2307,7 +2307,7 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req) case FSF_ADAPTER_STATUS_AVAILABLE: if (header->fsf_status_qual.word[0] == FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) - zfcp_test_link(unit->port); + zfcp_fc_test_link(unit->port); req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index a6cf62636834..c86496bb608b 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -126,7 +126,7 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) return -EBUSY; - ret = zfcp_scan_ports(adapter); + ret = zfcp_fc_scan_ports(adapter); return ret ? ret : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, -- cgit v1.2.3 From ea945ff84c2ce1089edb7914ffdd998c24c25903 Mon Sep 17 00:00:00 2001 From: Swen Schillig Date: Tue, 18 Aug 2009 15:43:24 +0200 Subject: [SCSI] zfcp: resolve false usage of dd_data in fc_rport The fc_rport structure reserves a reference where a LLD can put information required in a situation where the fc transport class is triggering LLD callbacks. The zfcp driver was using this variable directly which is discouraged. This patch solves this issue by making this reference unnecessary. In addition the dev_loss_tmo callback is removed, it is not required: zfcp does not access the fc_rport after calling fc_remote_port_delete. Signed-off-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 4 ---- drivers/s390/scsi/zfcp_fc.c | 2 +- drivers/s390/scsi/zfcp_scsi.c | 22 ++++------------------ 3 files changed, 5 insertions(+), 23 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 23b85a03e26a..ed9a8a1517c6 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -709,10 +709,6 @@ void zfcp_port_dequeue(struct zfcp_port *port) write_lock_irq(&zfcp_data.config_lock); list_del(&port->list); write_unlock_irq(&zfcp_data.config_lock); - if (port->rport) { - port->rport->dd_data = NULL; - port->rport = NULL; - } wait_event(port->remove_wq, atomic_read(&port->refcount) == 0); cancel_work_sync(&port->rport_work); /* usually not necessary */ zfcp_adapter_put(port->adapter); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 7433da900fab..5c1f12247e42 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -752,7 +752,7 @@ int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) els_fc_job->els.adapter = adapter; if (rport) { read_lock_irq(&zfcp_data.config_lock); - port = rport->dd_data; + port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (port) els_fc_job->els.d_id = port->d_id; read_unlock_irq(&zfcp_data.config_lock); diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index b6177ad2d5bf..3ff726afafc6 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -490,21 +490,6 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) rport->dev_loss_tmo = timeout; } -/** - * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport - * @rport: The rport that is about to be deleted. - */ -static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport) -{ - struct zfcp_port *port; - - write_lock_irq(&zfcp_data.config_lock); - port = rport->dd_data; - if (port) - port->rport = NULL; - write_unlock_irq(&zfcp_data.config_lock); -} - /** * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport * @rport: The FC rport where to teminate I/O @@ -516,9 +501,12 @@ static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport) static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) { struct zfcp_port *port; + struct Scsi_Host *shost = rport_to_shost(rport); + struct zfcp_adapter *adapter = + (struct zfcp_adapter *)shost->hostdata[0]; write_lock_irq(&zfcp_data.config_lock); - port = rport->dd_data; + port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (port) zfcp_port_get(port); write_unlock_irq(&zfcp_data.config_lock); @@ -550,7 +538,6 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) return; } - rport->dd_data = port; rport->maxframe_size = port->maxframe_size; rport->supported_classes = port->supported_classes; port->rport = rport; @@ -663,7 +650,6 @@ struct fc_function_template zfcp_transport_functions = { .reset_fc_host_stats = zfcp_reset_fc_host_stats, .set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo, .get_host_port_state = zfcp_get_host_port_state, - .dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk, .terminate_rport_io = zfcp_scsi_terminate_rport_io, .show_host_port_state = 1, .bsg_request = zfcp_execute_fc_job, -- cgit v1.2.3 From 347c6a965dc110c91a77f65181fc011ee257a4a6 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:25 +0200 Subject: [SCSI] zfcp: Use kthread API for zfcp erp thread Switch the creation of the zfcp erp thread from the deprecated kernel_thread API to the kthread API. This allows also the removal of some flags in zfcp since the kthread API handles thread creation and shutdown internally. To allow the usage of the kthread_stop function, replace the erp ready semaphore with a waitqueue for waiting until erp actions arrive on the ready queue. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 4 +-- drivers/s390/scsi/zfcp_def.h | 6 ++--- drivers/s390/scsi/zfcp_erp.c | 62 +++++++++++++++++--------------------------- 3 files changed, 27 insertions(+), 45 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index ed9a8a1517c6..e8f39f02bc3b 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -529,7 +529,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto generic_services_failed; init_waitqueue_head(&adapter->remove_wq); - init_waitqueue_head(&adapter->erp_thread_wqh); + init_waitqueue_head(&adapter->erp_ready_wq); init_waitqueue_head(&adapter->erp_done_wqh); INIT_LIST_HEAD(&adapter->port_list_head); @@ -541,8 +541,6 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); - sema_init(&adapter->erp_ready_sem, 0); - INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9a8ff0553421..ce65d88e280c 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -222,8 +222,6 @@ struct zfcp_ls_adisc { #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 -#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP 0x00000020 -#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL 0x00000080 #define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 @@ -481,10 +479,9 @@ struct zfcp_adapter { atomic_t status; /* status of this adapter */ struct list_head erp_ready_head; /* error recovery for this adapter/devices */ + wait_queue_head_t erp_ready_wq; struct list_head erp_running_head; rwlock_t erp_lock; - struct semaphore erp_ready_sem; - wait_queue_head_t erp_thread_wqh; wait_queue_head_t erp_done_wqh; struct zfcp_erp_action erp_action; /* pending error recovery */ atomic_t erp_counter; @@ -492,6 +489,7 @@ struct zfcp_adapter { actions */ u32 erp_low_mem_count; /* nr of erp actions waiting for memory */ + struct task_struct *erp_thread; struct zfcp_wka_ports *gs; /* generic services */ struct zfcp_dbf *dbf; /* debug traces */ struct zfcp_adapter_mempool pool; /* Adapter memory pools */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 373567eda8f6..577e15708598 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -9,6 +9,7 @@ #define KMSG_COMPONENT "zfcp" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include "zfcp_ext.h" #define ZFCP_MAX_ERPS 3 @@ -75,7 +76,7 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) list_move(&act->list, &act->adapter->erp_ready_head); zfcp_dbf_rec_action("erardy1", act); - up(&adapter->erp_ready_sem); + wake_up(&adapter->erp_ready_wq); zfcp_dbf_rec_thread("erardy2", adapter->dbf); } @@ -212,8 +213,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, int retval = 1, need; struct zfcp_erp_action *act = NULL; - if (!(atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)) + if (!adapter->erp_thread) return -EIO; need = zfcp_erp_required_act(want, adapter, port, unit); @@ -226,7 +226,7 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, goto out; ++adapter->erp_total_count; list_add_tail(&act->list, &adapter->erp_ready_head); - up(&adapter->erp_ready_sem); + wake_up(&adapter->erp_ready_wq); zfcp_dbf_rec_thread("eracte1", adapter->dbf); retval = 0; out: @@ -641,7 +641,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) } zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf); - down(&adapter->erp_ready_sem); + wait_event(adapter->erp_ready_wq, + !list_empty(&adapter->erp_ready_head)); zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf); if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) break; @@ -682,7 +683,8 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) return ZFCP_ERP_FAILED; zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); - down(&adapter->erp_ready_sem); + wait_event(adapter->erp_ready_wq, + !list_empty(&adapter->erp_ready_head)); zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) return ZFCP_ERP_FAILED; @@ -1285,21 +1287,17 @@ static int zfcp_erp_thread(void *data) struct list_head *next; struct zfcp_erp_action *act; unsigned long flags; - int ignore; - - daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev)); - /* Block all signals */ - siginitsetinv(¤t->blocked, 0); - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - wake_up(&adapter->erp_thread_wqh); - - while (!(atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) { + for (;;) { zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf); - ignore = down_interruptible(&adapter->erp_ready_sem); + wait_event_interruptible(adapter->erp_ready_wq, + !list_empty(&adapter->erp_ready_head) || + kthread_should_stop()); zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf); + if (kthread_should_stop()) + break; + write_lock_irqsave(&adapter->erp_lock, flags); next = adapter->erp_ready_head.next; write_unlock_irqrestore(&adapter->erp_lock, flags); @@ -1313,9 +1311,6 @@ static int zfcp_erp_thread(void *data) } } - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - wake_up(&adapter->erp_thread_wqh); - return 0; } @@ -1327,18 +1322,17 @@ static int zfcp_erp_thread(void *data) */ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) { - int retval; + struct task_struct *thread; - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status); - retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD); - if (retval < 0) { + thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s", + dev_name(&adapter->ccw_device->dev)); + if (IS_ERR(thread)) { dev_err(&adapter->ccw_device->dev, "Creating an ERP thread for the FCP device failed.\n"); - return retval; + return PTR_ERR(thread); } - wait_event(adapter->erp_thread_wqh, - atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_UP); + + adapter->erp_thread = thread; return 0; } @@ -1353,16 +1347,8 @@ int zfcp_erp_thread_setup(struct zfcp_adapter *adapter) */ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) { - atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status); - up(&adapter->erp_ready_sem); - zfcp_dbf_rec_thread_lock("erthrk1", adapter->dbf); - - wait_event(adapter->erp_thread_wqh, - !(atomic_read(&adapter->status) & - ZFCP_STATUS_ADAPTER_ERP_THREAD_UP)); - - atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, - &adapter->status); + kthread_stop(adapter->erp_thread); + adapter->erp_thread = NULL; } /** -- cgit v1.2.3 From 98fc4d5c8cd9bd1a412cca922feecb54c1c22d8e Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:26 +0200 Subject: [SCSI] zfcp: Simplify and update ct/gs and els timeout handling The recommendation for a timeout of 2 * R_A_TOV is the same for ct/gs and els requests, so set it in the common function used for initializing both request types. Besides, the timer inside zfcp should only run longer than the timeout set for the channel, so 10 seconds more should be enough (instead of 60 seconds). Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 5 ----- drivers/s390/scsi/zfcp_fc.c | 3 --- drivers/s390/scsi/zfcp_fsf.c | 15 ++++++--------- 3 files changed, 6 insertions(+), 17 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index ce65d88e280c..99830758e873 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -73,9 +73,6 @@ /*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/ -/* timeout for name-server lookup (in seconds) */ -#define ZFCP_NS_GID_PN_TIMEOUT 10 - /* task attribute values in FCP-2 FCP_CMND IU */ #define SIMPLE_Q 0 #define HEAD_OF_Q 1 @@ -319,7 +316,6 @@ struct ct_iu_gpn_ft_req { * @resp: scatter-gather list for response * @handler: handler function (called for response to the request) * @handler_data: data passed to handler function - * @timeout: FSF timeout for this request * @completion: completion for synchronization purposes * @status: used to pass error status to calling function */ @@ -329,7 +325,6 @@ struct zfcp_send_ct { struct scatterlist *resp; void (*handler)(unsigned long); unsigned long handler_data; - int timeout; struct completion *completion; int status; }; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 5c1f12247e42..82f148d09968 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -283,7 +283,6 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, gid_pn->ct.wka_port = &adapter->gs->ds; gid_pn->ct.handler = zfcp_fc_ns_handler; gid_pn->ct.handler_data = (unsigned long) &compl_rec; - gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT; gid_pn->ct.req = &gid_pn->req; gid_pn->ct.resp = &gid_pn->resp; sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, @@ -556,7 +555,6 @@ static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, ct->wka_port = &adapter->gs->ds; ct->handler = zfcp_fc_ns_handler; ct->handler_data = (unsigned long)&compl_rec; - ct->timeout = 10; ct->req = &gpn_ft->sg_req; ct->resp = gpn_ft->sg_resp; @@ -845,7 +843,6 @@ int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) ct_fc_job->ct.req = job->request_payload.sg_list; ct_fc_job->ct.resp = job->reply_payload.sg_list; - ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT; ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; ct_fc_job->ct.completion = NULL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index c241f032fd49..f09c863dc6bd 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1060,7 +1060,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, sg_resp, max_sbals); if (bytes <= 0) return -EIO; + + /* common settings for ct/gs and els requests */ req->qtcb->bottom.support.resp_buf_length = bytes; + req->qtcb->bottom.support.service_class = FSF_CLASS_3; + req->qtcb->bottom.support.timeout = 2 * R_A_TOV; + zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10); return 0; } @@ -1096,12 +1101,9 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) req->handler = zfcp_fsf_send_ct_handler; req->qtcb->header.port_handle = wka_port->handle; - req->qtcb->bottom.support.service_class = FSF_CLASS_3; - req->qtcb->bottom.support.timeout = ct->timeout; req->data = ct; zfcp_dbf_san_ct_request(req); - zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); if (ret) @@ -1176,7 +1178,6 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) { struct zfcp_fsf_req *req; struct zfcp_qdio *qdio = els->adapter->qdio; - struct fsf_qtcb_bottom_support *bottom; int ret = -EIO; spin_lock_bh(&qdio->req_q_lock); @@ -1196,16 +1197,12 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els) if (ret) goto failed_send; - bottom = &req->qtcb->bottom.support; + req->qtcb->bottom.support.d_id = els->d_id; req->handler = zfcp_fsf_send_els_handler; - bottom->d_id = els->d_id; - bottom->service_class = FSF_CLASS_3; - bottom->timeout = 2 * R_A_TOV; req->data = els; zfcp_dbf_san_els_request(req); - zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); ret = zfcp_fsf_req_send(req); if (ret) goto failed_send; -- cgit v1.2.3 From 143bb6bfe36d20618d8bf667915fe14d14b8ae2f Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:27 +0200 Subject: [SCSI] zfcp: Defer resource allocation to first ccw_set_online call So far, zfcp allocated all resources required for FCP adapters/subchannels when the device was discovered in the ccw_probe callback. If there are lots of unused FCP subchannels attached to a system, this is a waste of resources. To alleviate this, defer the resource allocation to the first call to ccw_set_online. To avoid disruptions during possible following calls to ccw_set_offline and then ccw_set_online, keep the adapter resources until the device is finally being removed via ccw_remove. While doing this, also manage the zfcp erp thread together with all other adapter resources in zfcp_adapter_enqueue/dequeue. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 6 ++++ drivers/s390/scsi/zfcp_ccw.c | 74 +++++++++++++++++++++++++------------------- drivers/s390/scsi/zfcp_erp.c | 5 +++ 3 files changed, 54 insertions(+), 31 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index e8f39f02bc3b..d1e75d36ed1a 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -541,6 +541,9 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) rwlock_init(&adapter->erp_lock); rwlock_init(&adapter->abort_lock); + if (zfcp_erp_thread_setup(adapter)) + goto erp_thread_failed; + INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later); @@ -561,6 +564,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) return 0; sysfs_failed: + zfcp_erp_thread_kill(adapter); +erp_thread_failed: zfcp_fc_gs_destroy(adapter); generic_services_failed: zfcp_destroy_adapter_work_queue(adapter); @@ -602,6 +607,7 @@ void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) return; zfcp_fc_gs_destroy(adapter); + zfcp_erp_thread_kill(adapter); zfcp_destroy_adapter_work_queue(adapter); zfcp_dbf_adapter_unregister(adapter->dbf); zfcp_free_low_mem_buffers(adapter); diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index d9da5c42ccbe..82ae6ed7ef83 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -18,6 +18,9 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev) { struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + if (!adapter) + return 0; + down(&zfcp_data.config_sema); zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); @@ -33,6 +36,9 @@ static int zfcp_ccw_activate(struct ccw_device *cdev) { struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); + if (!adapter) + return 0; + zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, @@ -63,25 +69,14 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) * zfcp_ccw_probe - probe function of zfcp driver * @ccw_device: pointer to belonging ccw device * - * This function gets called by the common i/o layer and sets up the initial - * data structures for each fcp adapter, which was detected by the system. - * Also the sysfs files for this adapter will be created by this function. - * In addition the nameserver port will be added to the ports of the adapter - * and its sysfs representation will be created too. + * This function gets called by the common i/o layer for each FCP + * device found on the current system. This is only a stub to make cio + * work: To only allocate adapter resources for devices actually used, + * the allocation is deferred to the first call to ccw_set_online. */ static int zfcp_ccw_probe(struct ccw_device *ccw_device) { - int retval = 0; - - down(&zfcp_data.config_sema); - if (zfcp_adapter_enqueue(ccw_device)) { - dev_err(&ccw_device->dev, - "Setting up data structures for the " - "FCP adapter failed\n"); - retval = -EINVAL; - } - up(&zfcp_data.config_sema); - return retval; + return 0; } /** @@ -102,8 +97,11 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) LIST_HEAD(port_remove_lh); ccw_device_set_offline(ccw_device); + down(&zfcp_data.config_sema); adapter = dev_get_drvdata(&ccw_device->dev); + if (!adapter) + goto out; write_lock_irq(&zfcp_data.config_lock); list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { @@ -129,6 +127,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0); zfcp_adapter_dequeue(adapter); +out: up(&zfcp_data.config_sema); } @@ -136,22 +135,33 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) * zfcp_ccw_set_online - set_online function of zfcp driver * @ccw_device: pointer to belonging ccw device * - * This function gets called by the common i/o layer and sets an adapter - * into state online. Setting an fcp device online means that it will be - * registered with the SCSI stack, that the QDIO queues will be set up - * and that the adapter will be opened (asynchronously). + * This function gets called by the common i/o layer and sets an + * adapter into state online. The first call will allocate all + * adapter resources that will be retained until the device is removed + * via zfcp_ccw_remove. + * + * Setting an fcp device online means that it will be registered with + * the SCSI stack, that the QDIO queues will be set up and that the + * adapter will be opened. */ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; - int retval; + int ret = 0; down(&zfcp_data.config_sema); adapter = dev_get_drvdata(&ccw_device->dev); - retval = zfcp_erp_thread_setup(adapter); - if (retval) - goto out; + if (!adapter) { + ret = zfcp_adapter_enqueue(ccw_device); + if (ret) { + dev_err(&ccw_device->dev, + "Setting up data structures for the " + "FCP adapter failed\n"); + goto out; + } + adapter = dev_get_drvdata(&ccw_device->dev); + } /* initialize request counter */ BUG_ON(!zfcp_reqlist_isempty(adapter)); @@ -162,13 +172,11 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ccsonl2", NULL); zfcp_erp_wait(adapter); +out: up(&zfcp_data.config_sema); - flush_work(&adapter->scan_work); - return 0; - - out: - up(&zfcp_data.config_sema); - return retval; + if (!ret) + flush_work(&adapter->scan_work); + return ret; } /** @@ -184,10 +192,13 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) down(&zfcp_data.config_sema); adapter = dev_get_drvdata(&ccw_device->dev); + if (!adapter) + goto out; + zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); zfcp_erp_wait(adapter); - zfcp_erp_thread_kill(adapter); up(&zfcp_data.config_sema); +out: return 0; } @@ -244,6 +255,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) adapter = dev_get_drvdata(&cdev->dev); zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); zfcp_erp_wait(adapter); + zfcp_erp_thread_kill(adapter); up(&zfcp_data.config_sema); } diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 577e15708598..73d366ba31e5 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -150,6 +150,9 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, a_status = atomic_read(&adapter->status); if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; + if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) && + !(a_status & ZFCP_STATUS_COMMON_OPEN)) + return 0; /* shutdown requested for closed adapter */ } return need; @@ -1349,6 +1352,8 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter) { kthread_stop(adapter->erp_thread); adapter->erp_thread = NULL; + WARN_ON(!list_empty(&adapter->erp_ready_head)); + WARN_ON(!list_empty(&adapter->erp_running_head)); } /** -- cgit v1.2.3 From 24680defdb55e073c5e43d14318a164b842d8ce7 Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:28 +0200 Subject: [SCSI] zfcp: Replace config semaphore with mutex The config semaphore is only used as a mutex, so replace it with a simple mutex. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 22 +++++++++++----------- drivers/s390/scsi/zfcp_ccw.c | 20 ++++++++++---------- drivers/s390/scsi/zfcp_def.h | 3 +-- drivers/s390/scsi/zfcp_fc.c | 4 ++-- drivers/s390/scsi/zfcp_sysfs.c | 16 ++++++++-------- 5 files changed, 32 insertions(+), 33 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index d1e75d36ed1a..8e989159e4ed 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -84,7 +84,7 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) struct zfcp_port *port; struct zfcp_unit *unit; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); read_lock_irq(&zfcp_data.config_lock); adapter = zfcp_get_adapter_by_busid(busid); if (adapter) @@ -99,20 +99,20 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) unit = zfcp_unit_enqueue(port, lun); if (IS_ERR(unit)) goto out_unit; - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); ccw_device_set_online(adapter->ccw_device); zfcp_erp_wait(adapter); flush_work(&unit->scsi_work); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); zfcp_unit_put(unit); out_unit: zfcp_port_put(port); out_port: zfcp_adapter_put(adapter); out_adapter: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return; } @@ -176,7 +176,7 @@ static int __init zfcp_module_init(void) if (!zfcp_data.gid_pn_cache) goto out_gid_cache; - sema_init(&zfcp_data.config_sema, 1); + mutex_init(&zfcp_data.config_mutex); rwlock_init(&zfcp_data.config_lock); zfcp_data.scsi_transport_template = @@ -266,7 +266,7 @@ static void zfcp_sysfs_unit_release(struct device *dev) * @port: pointer to port where unit is added * @fcp_lun: FCP LUN of unit to be enqueued * Returns: pointer to enqueued unit on success, ERR_PTR on error - * Locks: config_sema must be held to serialize changes to the unit list + * Locks: config_mutex must be held to serialize changes to the unit list * * Sets up some unit internal structures and creates sysfs entry. */ @@ -356,7 +356,7 @@ void zfcp_unit_dequeue(struct zfcp_unit *unit) static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) { - /* must only be called with zfcp_data.config_sema taken */ + /* must only be called with zfcp_data.config_mutex taken */ adapter->pool.erp_req = mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); if (!adapter->pool.erp_req) @@ -404,7 +404,7 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) { - /* zfcp_data.config_sema must be held */ + /* zfcp_data.config_mutex must be held */ if (adapter->pool.erp_req) mempool_destroy(adapter->pool.erp_req); if (adapter->pool.scsi_req) @@ -491,7 +491,7 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter) * Enqueues an adapter at the end of the adapter list in the driver data. * All adapter internal structures are set up. * Proc-fs entries are also created. - * locks: config_sema must be held to serialise changes to the adapter list + * locks: config_mutex must be held to serialize changes to the adapter list */ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) { @@ -499,7 +499,7 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) /* * Note: It is safe to release the list_lock, as any list changes - * are protected by the config_sema, which must be held to get here + * are protected by the config_mutex, which must be held to get here */ adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); @@ -630,7 +630,7 @@ static void zfcp_sysfs_port_release(struct device *dev) * @status: initial status for the port * @d_id: destination id of the remote port to be enqueued * Returns: pointer to enqueued port on success, ERR_PTR on error - * Locks: config_sema must be held to serialize changes to the port list + * Locks: config_mutex must be held to serialize changes to the port list * * All port internal structures are set up and the sysfs entry is generated. * d_id is used to enqueue ports with a well known address like the Directory diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 82ae6ed7ef83..0c90f8e71605 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c @@ -21,12 +21,12 @@ static int zfcp_ccw_suspend(struct ccw_device *cdev) if (!adapter) return 0; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); zfcp_erp_wait(adapter); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return 0; } @@ -98,7 +98,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) ccw_device_set_offline(ccw_device); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&ccw_device->dev); if (!adapter) goto out; @@ -128,7 +128,7 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device) zfcp_adapter_dequeue(adapter); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); } /** @@ -149,7 +149,7 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) struct zfcp_adapter *adapter; int ret = 0; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&ccw_device->dev); if (!adapter) { @@ -173,7 +173,7 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device) "ccsonl2", NULL); zfcp_erp_wait(adapter); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); if (!ret) flush_work(&adapter->scan_work); return ret; @@ -190,14 +190,14 @@ static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) { struct zfcp_adapter *adapter; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&ccw_device->dev); if (!adapter) goto out; zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); zfcp_erp_wait(adapter); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); out: return 0; } @@ -251,12 +251,12 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) { struct zfcp_adapter *adapter; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); adapter = dev_get_drvdata(&cdev->dev); zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); zfcp_erp_wait(adapter); zfcp_erp_thread_kill(adapter); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); } static struct ccw_driver zfcp_ccw_driver = { diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 99830758e873..cc98eead2c3c 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -604,8 +604,7 @@ struct zfcp_data { rwlock_t config_lock; /* serialises changes to adapter/port/unit lists */ - struct semaphore config_sema; /* serialises configuration - changes */ + struct mutex config_mutex; struct kmem_cache *gpn_ft_cache; struct kmem_cache *qtcb_cache; struct kmem_cache *sr_buffer_cache; diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 82f148d09968..722f22de8753 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -613,7 +613,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) return -E2BIG; } - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); /* first entry is the header */ for (x = 1; x < max_entries && !last; x++) { @@ -647,7 +647,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) zfcp_erp_wait(adapter); list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) zfcp_fc_validate_port(port); - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return ret; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index c86496bb608b..079a8cf518a3 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -88,7 +88,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ unsigned long val; \ int retval = 0; \ \ - down(&zfcp_data.config_sema); \ + mutex_lock(&zfcp_data.config_mutex); \ if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ retval = -EBUSY; \ goto out; \ @@ -105,7 +105,7 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \ _reopen_id, NULL); \ zfcp_erp_wait(_adapter); \ out: \ - up(&zfcp_data.config_sema); \ + mutex_unlock(&zfcp_data.config_mutex); \ return retval ? retval : (ssize_t) count; \ } \ static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ @@ -142,7 +142,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, int retval = 0; LIST_HEAD(port_remove_lh); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -173,7 +173,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, zfcp_port_put(port); zfcp_port_dequeue(port); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, @@ -207,7 +207,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, u64 fcp_lun; int retval = -EINVAL; - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -226,7 +226,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, zfcp_erp_wait(unit->port->adapter); zfcp_unit_put(unit); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); @@ -241,7 +241,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, int retval = 0; LIST_HEAD(unit_remove_lh); - down(&zfcp_data.config_sema); + mutex_lock(&zfcp_data.config_mutex); if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { retval = -EBUSY; goto out; @@ -282,7 +282,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev, zfcp_unit_put(unit); zfcp_unit_dequeue(unit); out: - up(&zfcp_data.config_sema); + mutex_unlock(&zfcp_data.config_mutex); return retval ? retval : (ssize_t) count; } static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); -- cgit v1.2.3 From f4395b652636398eb4712e6f3caf79c9a6c02e21 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Tue, 18 Aug 2009 15:43:29 +0200 Subject: [SCSI] zfcp: proper use of device register Don't use kfree directly after device registration started. Signed-off-by: Sebastian Ott Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 8e989159e4ed..a7954443ec19 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -309,8 +309,10 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) } read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&unit->sysfs_device)) - goto err_out_free; + if (device_register(&unit->sysfs_device)) { + put_device(&unit->sysfs_device); + return ERR_PTR(-EINVAL); + } if (sysfs_create_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs)) { @@ -675,8 +677,10 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, } read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&port->sysfs_device)) - goto err_out_free; + if (device_register(&port->sysfs_device)) { + put_device(&port->sysfs_device); + goto err_out; + } retval = sysfs_create_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs); -- cgit v1.2.3 From 0fac3f477b6b520ae7d972ceb6e958e6807c8e1a Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:30 +0200 Subject: [SCSI] zfcp: Handle failures during device allocation correctly dev_set_name tries to allocate memory, so check the return value for allocation failures. After dev_set_name succeeds, call device_register as next step to be able to use put_device during error handling. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_aux.c | 65 ++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 36 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index a7954443ec19..1be6bf7e8ce6 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -274,6 +274,13 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) { struct zfcp_unit *unit; + read_lock_irq(&zfcp_data.config_lock); + if (zfcp_get_unit_by_lun(port, fcp_lun)) { + read_unlock_irq(&zfcp_data.config_lock); + return ERR_PTR(-EINVAL); + } + read_unlock_irq(&zfcp_data.config_lock); + unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); if (!unit) return ERR_PTR(-ENOMEM); @@ -285,8 +292,11 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) unit->port = port; unit->fcp_lun = fcp_lun; - dev_set_name(&unit->sysfs_device, "0x%016llx", - (unsigned long long) fcp_lun); + if (dev_set_name(&unit->sysfs_device, "0x%016llx", + (unsigned long long) fcp_lun)) { + kfree(unit); + return ERR_PTR(-ENOMEM); + } unit->sysfs_device.parent = &port->sysfs_device; unit->sysfs_device.release = zfcp_sysfs_unit_release; dev_set_drvdata(&unit->sysfs_device, unit); @@ -302,13 +312,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) unit->latencies.cmd.channel.min = 0xFFFFFFFF; unit->latencies.cmd.fabric.min = 0xFFFFFFFF; - read_lock_irq(&zfcp_data.config_lock); - if (zfcp_get_unit_by_lun(port, fcp_lun)) { - read_unlock_irq(&zfcp_data.config_lock); - goto err_out_free; - } - read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&unit->sysfs_device)) { put_device(&unit->sysfs_device); return ERR_PTR(-EINVAL); @@ -317,7 +320,7 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) if (sysfs_create_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs)) { device_unregister(&unit->sysfs_device); - return ERR_PTR(-EIO); + return ERR_PTR(-EINVAL); } zfcp_unit_get(unit); @@ -332,10 +335,6 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) zfcp_port_get(port); return unit; - -err_out_free: - kfree(unit); - return ERR_PTR(-EINVAL); } /** @@ -642,7 +641,13 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, u32 status, u32 d_id) { struct zfcp_port *port; - int retval; + + read_lock_irq(&zfcp_data.config_lock); + if (zfcp_get_port_by_wwpn(adapter, wwpn)) { + read_unlock_irq(&zfcp_data.config_lock); + return ERR_PTR(-EINVAL); + } + read_unlock_irq(&zfcp_data.config_lock); port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); if (!port) @@ -663,31 +668,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status); atomic_set(&port->refcount, 0); - dev_set_name(&port->sysfs_device, "0x%016llx", - (unsigned long long)wwpn); + if (dev_set_name(&port->sysfs_device, "0x%016llx", + (unsigned long long)wwpn)) { + kfree(port); + return ERR_PTR(-ENOMEM); + } port->sysfs_device.parent = &adapter->ccw_device->dev; - port->sysfs_device.release = zfcp_sysfs_port_release; dev_set_drvdata(&port->sysfs_device, port); - read_lock_irq(&zfcp_data.config_lock); - if (zfcp_get_port_by_wwpn(adapter, wwpn)) { - read_unlock_irq(&zfcp_data.config_lock); - goto err_out_free; - } - read_unlock_irq(&zfcp_data.config_lock); - if (device_register(&port->sysfs_device)) { put_device(&port->sysfs_device); - goto err_out; + return ERR_PTR(-EINVAL); } - retval = sysfs_create_group(&port->sysfs_device.kobj, - &zfcp_sysfs_port_attrs); - - if (retval) { + if (sysfs_create_group(&port->sysfs_device.kobj, + &zfcp_sysfs_port_attrs)) { device_unregister(&port->sysfs_device); - goto err_out; + return ERR_PTR(-EINVAL); } zfcp_port_get(port); @@ -701,11 +699,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, zfcp_adapter_get(adapter); return port; - -err_out_free: - kfree(port); -err_out: - return ERR_PTR(-EINVAL); } /** -- cgit v1.2.3 From b592e89ac9af521be164490e45c53c93e89c776f Mon Sep 17 00:00:00 2001 From: Christof Schmitt Date: Tue, 18 Aug 2009 15:43:31 +0200 Subject: [SCSI] zfcp: Remove duplicated code for debug timestamps The timestamp calculation used for s390dbf output is the same in a private zfcp function and in debug.c. Replace both with a common inline function. Reviewed-by: Swen Schillig Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- arch/s390/include/asm/timex.h | 8 ++++++++ arch/s390/kernel/debug.c | 8 +------- arch/s390/kernel/time.c | 1 + drivers/s390/scsi/zfcp_dbf.c | 19 +++---------------- 4 files changed, 13 insertions(+), 23 deletions(-) (limited to 'drivers/s390') diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index cc21e3e20fd7..a07e699bb65c 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -88,6 +88,14 @@ int get_sync_clock(unsigned long long *clock); void init_cpu_timer(void); unsigned long long monotonic_clock(void); +void tod_to_timeval(__u64, struct timespec *); + +static inline +void stck_to_timespec(unsigned long long stck, struct timespec *ts) +{ + tod_to_timeval(stck - TOD_UNIX_EPOCH, ts); +} + extern u64 sched_clock_base_cc; #endif diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index be8bceaf37d9..4c512561687d 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -63,8 +63,6 @@ typedef struct } debug_sprintf_entry_t; -extern void tod_to_timeval(uint64_t todval, struct timespec *xtime); - /* internal function prototyes */ static int debug_init(void); @@ -1450,17 +1448,13 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, int area, debug_entry_t * entry, char *out_buf) { struct timespec time_spec; - unsigned long long time; char *except_str; unsigned long caller; int rc = 0; unsigned int level; level = entry->id.fields.level; - time = entry->id.stck; - /* adjust todclock to 1970 */ - time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); - tod_to_timeval(time, &time_spec); + stck_to_timespec(entry->id.stck, &time_spec); if (entry->id.fields.exception) except_str = "*"; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c81..9693d98d4d4a 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -90,6 +90,7 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) todval -= (sec * 1000000) << 12; xtime->tv_nsec = ((todval * 1000) >> 12); } +EXPORT_SYMBOL(tod_to_timeval); void clock_comparator_work(void) { diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index c066428b2878..215b70749e95 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -38,19 +38,6 @@ static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, } } -/* FIXME: this duplicate this code in s390 debug feature */ -static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time) -{ - unsigned long long sec; - - stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096); - sec = stck >> 12; - do_div(sec, 1000000); - time->tv_sec = sec; - stck -= (sec * 1000000) << 12; - time->tv_nsec = ((stck * 1000) >> 12); -} - static void zfcp_dbf_tag(char **p, const char *label, const char *tag) { int i; @@ -107,7 +94,7 @@ static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, char *p = out_buf; if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { - zfcp_dbf_timestamp(entry->id.stck, &t); + stck_to_timespec(entry->id.stck, &t); zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", t.tv_sec, t.tv_nsec); zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); @@ -320,7 +307,7 @@ static void zfcp_dbf_hba_view_response(char **p, zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); - zfcp_dbf_timestamp(r->fsf_issued, &t); + stck_to_timespec(r->fsf_issued, &t); zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); @@ -976,7 +963,7 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); - zfcp_dbf_timestamp(r->fsf_issued, &t); + stck_to_timespec(r->fsf_issued, &t); zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { -- cgit v1.2.3 From 41e05a12c7aae16f0381103af3e5ca791e87ce59 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 18 Aug 2009 15:43:32 +0200 Subject: [SCSI] zfcp: optimize zfcp_qdio_account Remove expensive ktime_get()/ktime_us_delta() functions from the hot path and use get_clock_monotonic() instead. This elimates seven function calls and avoids a lot of unnecessary calculations. Signed-off-by: Heiko Carstens Signed-off-by: Christof Schmitt Signed-off-by: James Bottomley --- drivers/s390/scsi/zfcp_def.h | 2 +- drivers/s390/scsi/zfcp_qdio.c | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/s390') diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index cc98eead2c3c..7da2fad8f515 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -438,7 +438,7 @@ struct zfcp_qdio { struct zfcp_qdio_queue req_q; spinlock_t stat_lock; spinlock_t req_q_lock; - ktime_t req_q_time; + unsigned long long req_q_time; u64 req_q_util; atomic_t req_q_full; wait_queue_head_t req_q_wq; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 2b499e28ff1f..6c5228b627fc 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -56,16 +56,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) } /* this needs to be called prior to updating the queue fill level */ -static void zfcp_qdio_account(struct zfcp_qdio *qdio) +static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) { - ktime_t now; - s64 span; + unsigned long long now, span; int free, used; spin_lock(&qdio->stat_lock); - now = ktime_get(); - span = ktime_us_delta(now, qdio->req_q_time); - free = max(0, atomic_read(&qdio->req_q.count)); + now = get_clock_monotonic(); + span = (now - qdio->req_q_time) >> 12; + free = atomic_read(&qdio->req_q.count); used = QDIO_MAX_BUFFERS_PER_Q - free; qdio->req_q_util += used * span; qdio->req_q_time = now; -- cgit v1.2.3