diff options
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd.c | 31 | ||||
-rw-r--r-- | drivers/s390/block/dasd_genhd.c | 26 | ||||
-rw-r--r-- | drivers/s390/block/scm_blk.c | 222 | ||||
-rw-r--r-- | drivers/s390/block/scm_blk.h | 6 | ||||
-rw-r--r-- | drivers/s390/block/scm_blk_cluster.c | 69 | ||||
-rw-r--r-- | drivers/s390/char/Kconfig | 10 | ||||
-rw-r--r-- | drivers/s390/char/sclp_async.c | 3 | ||||
-rw-r--r-- | drivers/s390/cio/eadm_sch.c | 2 |
8 files changed, 278 insertions, 91 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 329db997ee66..4abf11965484 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1377,6 +1377,20 @@ int dasd_term_IO(struct dasd_ccw_req *cqr) "I/O error, retry"); break; case -EINVAL: + /* + * device not valid so no I/O could be running + * handle CQR as termination successful + */ + cqr->status = DASD_CQR_CLEARED; + cqr->stopclk = get_tod_clock(); + cqr->starttime = 0; + /* no retries for invalid devices */ + cqr->retries = -1; + DBF_DEV_EVENT(DBF_ERR, device, "%s", + "EINVAL, handle as terminated"); + /* fake rc to success */ + rc = 0; + break; case -EBUSY: DBF_DEV_EVENT(DBF_ERR, device, "%s", "device busy, retry later"); @@ -1683,11 +1697,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, if (cqr->status == DASD_CQR_CLEAR_PENDING && scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { cqr->status = DASD_CQR_CLEARED; - if (cqr->callback_data == DASD_SLEEPON_START_TAG) - cqr->callback_data = DASD_SLEEPON_END_TAG; dasd_device_clear_timer(device); wake_up(&dasd_flush_wq); - wake_up(&generic_waitq); dasd_schedule_device_bh(device); return; } @@ -2326,21 +2337,11 @@ retry: return -EAGAIN; /* normal recovery for basedev IO */ - if (__dasd_sleep_on_erp(cqr)) { + if (__dasd_sleep_on_erp(cqr)) + /* handle erp first */ goto retry; - /* remember that ERP was needed */ - rc = 1; - /* skip processing for active cqr */ - if (cqr->status != DASD_CQR_TERMINATED && - cqr->status != DASD_CQR_NEED_ERP) - break; - } } - /* start ERP requests in upper loop */ - if (rc) - goto retry; - return 0; } diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index f224d59c4b6b..90f39f79f5d7 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -99,15 +99,37 @@ void dasd_gendisk_free(struct dasd_block *block) int dasd_scan_partitions(struct dasd_block *block) { struct block_device *bdev; + int retry, rc; + retry = 5; bdev = bdget_disk(block->gdp, 0); - if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0) + if (!bdev) { + DBF_DEV_EVENT(DBF_ERR, block->base, "%s", + "scan partitions error, bdget returned NULL"); return -ENODEV; + } + + rc = blkdev_get(bdev, FMODE_READ, NULL); + if (rc < 0) { + DBF_DEV_EVENT(DBF_ERR, block->base, + "scan partitions error, blkdev_get returned %d", + rc); + return -ENODEV; + } /* * See fs/partition/check.c:register_disk,rescan_partitions * Can't call rescan_partitions directly. Use ioctl. */ - ioctl_by_bdev(bdev, BLKRRPART, 0); + rc = ioctl_by_bdev(bdev, BLKRRPART, 0); + while (rc == -EBUSY && retry > 0) { + schedule(); + rc = ioctl_by_bdev(bdev, BLKRRPART, 0); + retry--; + DBF_DEV_EVENT(DBF_ERR, block->base, + "scan partitions error, retry %d rc %d", + retry, rc); + } + /* * Since the matching blkdev_put call to the blkdev_get in * this function is not called before dasd_destroy_partitions diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 56046ab39629..75d9896deccb 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/mempool.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/genhd.h> @@ -20,13 +21,18 @@ debug_info_t *scm_debug; static int scm_major; +static mempool_t *aidaw_pool; static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(inactive_requests); static unsigned int nr_requests = 64; +static unsigned int nr_requests_per_io = 8; static atomic_t nr_devices = ATOMIC_INIT(0); module_param(nr_requests, uint, S_IRUGO); MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); +module_param(nr_requests_per_io, uint, S_IRUGO); +MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO."); + MODULE_DESCRIPTION("Block driver for s390 storage class memory."); MODULE_LICENSE("GPL"); MODULE_ALIAS("scm:scmdev*"); @@ -36,8 +42,8 @@ static void __scm_free_rq(struct scm_request *scmrq) struct aob_rq_header *aobrq = to_aobrq(scmrq); free_page((unsigned long) scmrq->aob); - free_page((unsigned long) scmrq->aidaw); __scm_free_rq_cluster(scmrq); + kfree(scmrq->request); kfree(aobrq); } @@ -53,6 +59,8 @@ static void scm_free_rqs(void) __scm_free_rq(scmrq); } spin_unlock_irq(&list_lock); + + mempool_destroy(aidaw_pool); } static int __scm_alloc_rq(void) @@ -65,17 +73,17 @@ static int __scm_alloc_rq(void) return -ENOMEM; scmrq = (void *) aobrq->data; - scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA); scmrq->aob = (void *) get_zeroed_page(GFP_DMA); - if (!scmrq->aob || !scmrq->aidaw) { - __scm_free_rq(scmrq); - return -ENOMEM; - } + if (!scmrq->aob) + goto free; - if (__scm_alloc_rq_cluster(scmrq)) { - __scm_free_rq(scmrq); - return -ENOMEM; - } + scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]), + GFP_KERNEL); + if (!scmrq->request) + goto free; + + if (__scm_alloc_rq_cluster(scmrq)) + goto free; INIT_LIST_HEAD(&scmrq->list); spin_lock_irq(&list_lock); @@ -83,12 +91,19 @@ static int __scm_alloc_rq(void) spin_unlock_irq(&list_lock); return 0; +free: + __scm_free_rq(scmrq); + return -ENOMEM; } static int scm_alloc_rqs(unsigned int nrqs) { int ret = 0; + aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0); + if (!aidaw_pool) + return -ENOMEM; + while (nrqs-- && !ret) ret = __scm_alloc_rq(); @@ -112,6 +127,18 @@ out: static void scm_request_done(struct scm_request *scmrq) { unsigned long flags; + struct msb *msb; + u64 aidaw; + int i; + + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { + msb = &scmrq->aob->msb[i]; + aidaw = msb->data_addr; + + if ((msb->flags & MSB_FLAG_IDA) && aidaw && + IS_ALIGNED(aidaw, PAGE_SIZE)) + mempool_free(virt_to_page(aidaw), aidaw_pool); + } spin_lock_irqsave(&list_lock, flags); list_add(&scmrq->list, &inactive_requests); @@ -123,48 +150,90 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; } -static void scm_request_prepare(struct scm_request *scmrq) +static inline struct aidaw *scm_aidaw_alloc(void) +{ + struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); + + return page ? page_address(page) : NULL; +} + +static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw) +{ + unsigned long _aidaw = (unsigned long) aidaw; + unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw; + + return (bytes / sizeof(*aidaw)) * PAGE_SIZE; +} + +struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes) +{ + struct aidaw *aidaw; + + if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes) + return scmrq->next_aidaw; + + aidaw = scm_aidaw_alloc(); + if (aidaw) + memset(aidaw, 0, PAGE_SIZE); + return aidaw; +} + +static int scm_request_prepare(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; struct scm_device *scmdev = bdev->gendisk->private_data; - struct aidaw *aidaw = scmrq->aidaw; - struct msb *msb = &scmrq->aob->msb[0]; + int pos = scmrq->aob->request.msb_count; + struct msb *msb = &scmrq->aob->msb[pos]; + struct request *req = scmrq->request[pos]; struct req_iterator iter; + struct aidaw *aidaw; struct bio_vec bv; + aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req)); + if (!aidaw) + return -ENOMEM; + msb->bs = MSB_BS_4K; - scmrq->aob->request.msb_count = 1; - msb->scm_addr = scmdev->address + - ((u64) blk_rq_pos(scmrq->request) << 9); - msb->oc = (rq_data_dir(scmrq->request) == READ) ? - MSB_OC_READ : MSB_OC_WRITE; + scmrq->aob->request.msb_count++; + msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); + msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; msb->flags |= MSB_FLAG_IDA; msb->data_addr = (u64) aidaw; - rq_for_each_segment(bv, scmrq->request, iter) { + rq_for_each_segment(bv, req, iter) { WARN_ON(bv.bv_offset); msb->blk_count += bv.bv_len >> 12; aidaw->data_addr = (u64) page_address(bv.bv_page); aidaw++; } + + scmrq->next_aidaw = aidaw; + return 0; +} + +static inline void scm_request_set(struct scm_request *scmrq, + struct request *req) +{ + scmrq->request[scmrq->aob->request.msb_count] = req; } static inline void scm_request_init(struct scm_blk_dev *bdev, - struct scm_request *scmrq, - struct request *req) + struct scm_request *scmrq) { struct aob_rq_header *aobrq = to_aobrq(scmrq); struct aob *aob = scmrq->aob; + memset(scmrq->request, 0, + nr_requests_per_io * sizeof(scmrq->request[0])); memset(aob, 0, sizeof(*aob)); - memset(scmrq->aidaw, 0, PAGE_SIZE); aobrq->scmdev = bdev->scmdev; aob->request.cmd_code = ARQB_CMD_MOVE; aob->request.data = (u64) aobrq; - scmrq->request = req; scmrq->bdev = bdev; scmrq->retries = 4; scmrq->error = 0; + /* We don't use all msbs - place aidaws at the end of the aob page. */ + scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io]; scm_request_cluster_init(scmrq); } @@ -180,9 +249,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) void scm_request_requeue(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_requeue_request(bdev->rq, scmrq->request); + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) + blk_requeue_request(bdev->rq, scmrq->request[i]); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); scm_ensure_queue_restart(bdev); @@ -191,20 +263,41 @@ void scm_request_requeue(struct scm_request *scmrq) void scm_request_finish(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; + int i; scm_release_cluster(scmrq); - blk_end_request_all(scmrq->request, scmrq->error); + for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) + blk_end_request_all(scmrq->request[i], scmrq->error); + atomic_dec(&bdev->queued_reqs); scm_request_done(scmrq); } +static int scm_request_start(struct scm_request *scmrq) +{ + struct scm_blk_dev *bdev = scmrq->bdev; + int ret; + + atomic_inc(&bdev->queued_reqs); + if (!scmrq->aob->request.msb_count) { + scm_request_requeue(scmrq); + return -EINVAL; + } + + ret = eadm_start_aob(scmrq->aob); + if (ret) { + SCM_LOG(5, "no subchannel"); + scm_request_requeue(scmrq); + } + return ret; +} + static void scm_blk_request(struct request_queue *rq) { struct scm_device *scmdev = rq->queuedata; struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); - struct scm_request *scmrq; + struct scm_request *scmrq = NULL; struct request *req; - int ret; while ((req = blk_peek_request(rq))) { if (req->cmd_type != REQ_TYPE_FS) { @@ -214,39 +307,64 @@ static void scm_blk_request(struct request_queue *rq) continue; } - if (!scm_permit_request(bdev, req)) { - scm_ensure_queue_restart(bdev); - return; - } - scmrq = scm_request_fetch(); + if (!scm_permit_request(bdev, req)) + goto out; + if (!scmrq) { - SCM_LOG(5, "no request"); - scm_ensure_queue_restart(bdev); - return; + scmrq = scm_request_fetch(); + if (!scmrq) { + SCM_LOG(5, "no request"); + goto out; + } + scm_request_init(bdev, scmrq); } - scm_request_init(bdev, scmrq, req); + scm_request_set(scmrq, req); + if (!scm_reserve_cluster(scmrq)) { SCM_LOG(5, "cluster busy"); + scm_request_set(scmrq, NULL); + if (scmrq->aob->request.msb_count) + goto out; + scm_request_done(scmrq); return; } + if (scm_need_cluster_request(scmrq)) { - atomic_inc(&bdev->queued_reqs); - blk_start_request(req); - scm_initiate_cluster_request(scmrq); - return; + if (scmrq->aob->request.msb_count) { + /* Start cluster requests separately. */ + scm_request_set(scmrq, NULL); + if (scm_request_start(scmrq)) + return; + } else { + atomic_inc(&bdev->queued_reqs); + blk_start_request(req); + scm_initiate_cluster_request(scmrq); + } + scmrq = NULL; + continue; + } + + if (scm_request_prepare(scmrq)) { + SCM_LOG(5, "aidaw alloc failed"); + scm_request_set(scmrq, NULL); + goto out; } - scm_request_prepare(scmrq); - atomic_inc(&bdev->queued_reqs); blk_start_request(req); - ret = eadm_start_aob(scmrq->aob); - if (ret) { - SCM_LOG(5, "no subchannel"); - scm_request_requeue(scmrq); + if (scmrq->aob->request.msb_count < nr_requests_per_io) + continue; + + if (scm_request_start(scmrq)) return; - } + + scmrq = NULL; } +out: + if (scmrq) + scm_request_start(scmrq); + else + scm_ensure_queue_restart(bdev); } static void __scmrq_log_error(struct scm_request *scmrq) @@ -443,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev) spin_unlock_irqrestore(&bdev->lock, flags); } +static bool __init scm_blk_params_valid(void) +{ + if (!nr_requests_per_io || nr_requests_per_io > 64) + return false; + + return scm_cluster_size_valid(); +} + static int __init scm_blk_init(void) { int ret = -EINVAL; - if (!scm_cluster_size_valid()) + if (!scm_blk_params_valid()) goto out; ret = register_blkdev(0, "scm"); diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index e59331e6c2e5..09218cdc5129 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -30,8 +30,8 @@ struct scm_blk_dev { struct scm_request { struct scm_blk_dev *bdev; - struct request *request; - struct aidaw *aidaw; + struct aidaw *next_aidaw; + struct request **request; struct aob *aob; struct list_head list; u8 retries; @@ -55,6 +55,8 @@ void scm_blk_irq(struct scm_device *, void *, int); void scm_request_finish(struct scm_request *); void scm_request_requeue(struct scm_request *); +struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes); + int scm_drv_init(void); void scm_drv_cleanup(void); diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 9aae909d47a5..09db45296eed 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq) scmrq->cluster.state = CLUSTER_NONE; } -static bool clusters_intersect(struct scm_request *A, struct scm_request *B) +static bool clusters_intersect(struct request *A, struct request *B) { unsigned long firstA, lastA, firstB, lastB; - firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; - lastA = (((u64) blk_rq_pos(A->request) << 9) + - blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; + firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; + lastA = (((u64) blk_rq_pos(A) << 9) + + blk_rq_bytes(A) - 1) / CLUSTER_SIZE; - firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; - lastB = (((u64) blk_rq_pos(B->request) << 9) + - blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; + firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; + lastB = (((u64) blk_rq_pos(B) << 9) + + blk_rq_bytes(B) - 1) / CLUSTER_SIZE; return (firstB <= lastA && firstA <= lastB); } bool scm_reserve_cluster(struct scm_request *scmrq) { + struct request *req = scmrq->request[scmrq->aob->request.msb_count]; struct scm_blk_dev *bdev = scmrq->bdev; struct scm_request *iter; + int pos, add = 1; if (write_cluster_size == 0) return true; spin_lock(&bdev->lock); list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { - if (clusters_intersect(scmrq, iter) && - (rq_data_dir(scmrq->request) == WRITE || - rq_data_dir(iter->request) == WRITE)) { - spin_unlock(&bdev->lock); - return false; + if (iter == scmrq) { + /* + * We don't have to use clusters_intersect here, since + * cluster requests are always started separately. + */ + add = 0; + continue; + } + for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { + if (clusters_intersect(req, iter->request[pos]) && + (rq_data_dir(req) == WRITE || + rq_data_dir(iter->request[pos]) == WRITE)) { + spin_unlock(&bdev->lock); + return false; + } } } - list_add(&scmrq->cluster.list, &bdev->cluster_list); + if (add) + list_add(&scmrq->cluster.list, &bdev->cluster_list); spin_unlock(&bdev->lock); return true; @@ -114,14 +127,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); } -static void scm_prepare_cluster_request(struct scm_request *scmrq) +static int scm_prepare_cluster_request(struct scm_request *scmrq) { struct scm_blk_dev *bdev = scmrq->bdev; struct scm_device *scmdev = bdev->gendisk->private_data; - struct request *req = scmrq->request; - struct aidaw *aidaw = scmrq->aidaw; + struct request *req = scmrq->request[0]; struct msb *msb = &scmrq->aob->msb[0]; struct req_iterator iter; + struct aidaw *aidaw; struct bio_vec bv; int i = 0; u64 addr; @@ -131,11 +144,9 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) scmrq->cluster.state = CLUSTER_READ; /* fall through */ case CLUSTER_READ: - scmrq->aob->request.msb_count = 1; msb->bs = MSB_BS_4K; msb->oc = MSB_OC_READ; msb->flags = MSB_FLAG_IDA; - msb->data_addr = (u64) aidaw; msb->blk_count = write_cluster_size; addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); @@ -146,6 +157,12 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) CLUSTER_SIZE)) msb->blk_count = 2 * write_cluster_size; + aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE); + if (!aidaw) + return -ENOMEM; + + scmrq->aob->request.msb_count = 1; + msb->data_addr = (u64) aidaw; for (i = 0; i < msb->blk_count; i++) { aidaw->data_addr = (u64) scmrq->cluster.buf[i]; aidaw++; @@ -153,6 +170,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) break; case CLUSTER_WRITE: + aidaw = (void *) msb->data_addr; msb->oc = MSB_OC_WRITE; for (addr = msb->scm_addr; @@ -173,22 +191,29 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) } break; } + return 0; } bool scm_need_cluster_request(struct scm_request *scmrq) { - if (rq_data_dir(scmrq->request) == READ) + int pos = scmrq->aob->request.msb_count; + + if (rq_data_dir(scmrq->request[pos]) == READ) return false; - return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; + return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; } /* Called with queue lock held. */ void scm_initiate_cluster_request(struct scm_request *scmrq) { - scm_prepare_cluster_request(scmrq); + if (scm_prepare_cluster_request(scmrq)) + goto requeue; if (eadm_start_aob(scmrq->aob)) - scm_request_requeue(scmrq); + goto requeue; + return; +requeue: + scm_request_requeue(scmrq); } bool scm_test_cluster_request(struct scm_request *scmrq) diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig index db2cb1f8a1b5..a5c6f7e157aa 100644 --- a/drivers/s390/char/Kconfig +++ b/drivers/s390/char/Kconfig @@ -102,6 +102,16 @@ config SCLP_ASYNC want for inform other people about your kernel panics, need this feature and intend to run your kernel in LPAR. +config SCLP_ASYNC_ID + string "Component ID for Call Home" + depends on SCLP_ASYNC + default "000000000" + help + The Component ID for Call Home is used to identify the correct + problem reporting queue the call home records should be sent to. + + If your are unsure, please use the default value "000000000". + config HMC_DRV def_tristate m prompt "Support for file transfers from HMC drive CD/DVD-ROM" diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 5f9f929e891c..19c25427f27f 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -137,7 +137,8 @@ static int sclp_async_send_wait(char *message) * Retain Queue * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS) */ - strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id)); + strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID, + sizeof(sccb->evbuf.comp_id)); sccb->evbuf.header.length = sizeof(sccb->evbuf); sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header); sccb->header.function_code = SCLP_NORMAL_WRITE; diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 37f0834300ea..bee8c11cd086 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -31,7 +31,7 @@ MODULE_DESCRIPTION("driver for s390 eadm subchannels"); MODULE_LICENSE("GPL"); -#define EADM_TIMEOUT (5 * HZ) +#define EADM_TIMEOUT (7 * HZ) static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(eadm_list); |