diff options
Diffstat (limited to 'drivers/s390')
27 files changed, 413 insertions, 182 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 6cca72782af6..cf87eb27879f 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void) (unsigned long) block); INIT_LIST_HEAD(&block->ccw_queue); spin_lock_init(&block->queue_lock); + INIT_LIST_HEAD(&block->format_list); + spin_lock_init(&block->format_lock); timer_setup(&block->timer, dasd_block_timeout, 0); spin_lock_init(&block->profile.lock); @@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, if (dasd_ese_needs_format(cqr->block, irb)) { if (rq_data_dir((struct request *)cqr->callback_data) == READ) { - device->discipline->ese_read(cqr); + device->discipline->ese_read(cqr, irb); cqr->status = DASD_CQR_SUCCESS; cqr->stopclk = now; dasd_device_clear_timer(device); dasd_schedule_device_bh(device); return; } - fcqr = device->discipline->ese_format(device, cqr); + fcqr = device->discipline->ese_format(device, cqr, irb); if (IS_ERR(fcqr)) { + if (PTR_ERR(fcqr) == -EINVAL) { + cqr->status = DASD_CQR_ERROR; + return; + } /* * If we can't format now, let the request go * one extra round. Maybe we can format later. */ cqr->status = DASD_CQR_QUEUED; + dasd_schedule_device_bh(device); + return; } else { fcqr->status = DASD_CQR_QUEUED; cqr->status = DASD_CQR_QUEUED; @@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) { struct request *req; blk_status_t error = BLK_STS_OK; + unsigned int proc_bytes; int status; req = (struct request *) cqr->callback_data; dasd_profile_end(cqr->block, cqr, req); + proc_bytes = cqr->proc_bytes; status = cqr->block->base->discipline->free_cp(cqr, req); if (status < 0) error = errno_to_blk_status(status); @@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) blk_mq_end_request(req, error); blk_mq_run_hw_queues(req->q, true); } else { - blk_mq_complete_request(req); + /* + * Partial completed requests can happen with ESE devices. + * During read we might have gotten a NRF error and have to + * complete a request partially. + */ + if (proc_bytes) { + blk_update_request(req, BLK_STS_OK, + blk_rq_bytes(req) - proc_bytes); + blk_mq_requeue_request(req, true); + } else { + blk_mq_complete_request(req); + } } } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index a28b9ff82378..ad44d22e8859 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) geo->head |= head; } +/* + * calculate failing track from sense data depending if + * it is an EAV device or not + */ +static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device, + sector_t *track) +{ + struct dasd_eckd_private *private = device->private; + u8 *sense = NULL; + u32 cyl; + u8 head; + + sense = dasd_get_sense(irb); + if (!sense) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "ESE error no sense data\n"); + return -EINVAL; + } + if (!(sense[27] & DASD_SENSE_BIT_2)) { + DBF_DEV_EVENT(DBF_WARNING, device, "%s", + "ESE error no valid track data\n"); + return -EINVAL; + } + + if (sense[27] & DASD_SENSE_BIT_3) { + /* enhanced addressing */ + cyl = sense[30] << 20; + cyl |= (sense[31] & 0xF0) << 12; + cyl |= sense[28] << 8; + cyl |= sense[29]; + } else { + cyl = sense[29] << 8; + cyl |= sense[30]; + } + head = sense[31] & 0x0F; + *track = cyl * private->rdc_data.trk_per_cyl + head; + return 0; +} + static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, struct dasd_device *device) { @@ -2986,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base, 0, NULL); } +static bool test_and_set_format_track(struct dasd_format_entry *to_format, + struct dasd_block *block) +{ + struct dasd_format_entry *format; + unsigned long flags; + bool rc = false; + + spin_lock_irqsave(&block->format_lock, flags); + list_for_each_entry(format, &block->format_list, list) { + if (format->track == to_format->track) { + rc = true; + goto out; + } + } + list_add_tail(&to_format->list, &block->format_list); + +out: + spin_unlock_irqrestore(&block->format_lock, flags); + return rc; +} + +static void clear_format_track(struct dasd_format_entry *format, + struct dasd_block *block) +{ + unsigned long flags; + + spin_lock_irqsave(&block->format_lock, flags); + list_del_init(&format->list); + spin_unlock_irqrestore(&block->format_lock, flags); +} + /* * Callback function to free ESE format requests. */ @@ -2993,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) { struct dasd_device *device = cqr->startdev; struct dasd_eckd_private *private = device->private; + struct dasd_format_entry *format = data; + clear_format_track(format, cqr->basedev->block); private->count--; dasd_ffree_request(cqr, device); } static struct dasd_ccw_req * -dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) +dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, + struct irb *irb) { struct dasd_eckd_private *private; + struct dasd_format_entry *format; struct format_data_t fdata; unsigned int recs_per_trk; struct dasd_ccw_req *fcqr; @@ -3011,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) struct request *req; sector_t first_trk; sector_t last_trk; + sector_t curr_trk; int rc; req = cqr->callback_data; - base = cqr->block->base; + block = cqr->block; + base = block->base; private = base->private; - block = base->block; blksize = block->bp_block; recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); + format = &startdev->format_entry; first_trk = blk_rq_pos(req) >> block->s2b_shift; sector_div(first_trk, recs_per_trk); last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; sector_div(last_trk, recs_per_trk); + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); + if (rc) + return ERR_PTR(rc); - fdata.start_unit = first_trk; - fdata.stop_unit = last_trk; + if (curr_trk < first_trk || curr_trk > last_trk) { + DBF_DEV_EVENT(DBF_WARNING, startdev, + "ESE error track %llu not within range %llu - %llu\n", + curr_trk, first_trk, last_trk); + return ERR_PTR(-EINVAL); + } + format->track = curr_trk; + /* test if track is already in formatting by another thread */ + if (test_and_set_format_track(format, block)) + return ERR_PTR(-EEXIST); + + fdata.start_unit = curr_trk; + fdata.stop_unit = curr_trk; fdata.blksize = blksize; fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; @@ -3044,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) return fcqr; fcqr->callback = dasd_eckd_ese_format_cb; + fcqr->callback_data = (void *) format; return fcqr; } @@ -3051,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr) /* * When data is read from an unformatted area of an ESE volume, this function * returns zeroed data and thereby mimics a read of zero data. + * + * The first unformatted track is the one that got the NRF error, the address is + * encoded in the sense data. + * + * All tracks before have returned valid data and should not be touched. + * All tracks after the unformatted track might be formatted or not. This is + * currently not known, remember the processed data and return the remainder of + * the request to the blocklayer in __dasd_cleanup_cqr(). */ -static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr) +static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) { + struct dasd_eckd_private *private; + sector_t first_trk, last_trk; + sector_t first_blk, last_blk; unsigned int blksize, off; + unsigned int recs_per_trk; struct dasd_device *base; struct req_iterator iter; + struct dasd_block *block; + unsigned int skip_block; + unsigned int blk_count; struct request *req; struct bio_vec bv; + sector_t curr_trk; + sector_t end_blk; char *dst; + int rc; req = (struct request *) cqr->callback_data; base = cqr->block->base; blksize = base->block->bp_block; + block = cqr->block; + private = base->private; + skip_block = 0; + blk_count = 0; + + recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); + first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; + sector_div(first_trk, recs_per_trk); + last_trk = last_blk = + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; + sector_div(last_trk, recs_per_trk); + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); + if (rc) + return rc; + + /* sanity check if the current track from sense data is valid */ + if (curr_trk < first_trk || curr_trk > last_trk) { + DBF_DEV_EVENT(DBF_WARNING, base, + "ESE error track %llu not within range %llu - %llu\n", + curr_trk, first_trk, last_trk); + return -EINVAL; + } + + /* + * if not the first track got the NRF error we have to skip over valid + * blocks + */ + if (curr_trk != first_trk) + skip_block = curr_trk * recs_per_trk - first_blk; + + /* we have no information beyond the current track */ + end_blk = (curr_trk + 1) * recs_per_trk; rq_for_each_segment(bv, req, iter) { dst = page_address(bv.bv_page) + bv.bv_offset; for (off = 0; off < bv.bv_len; off += blksize) { - if (dst && rq_data_dir(req) == READ) { + if (first_blk + blk_count >= end_blk) { + cqr->proc_bytes = blk_count * blksize; + return 0; + } + if (dst && !skip_block) { dst += off; memset(dst, 0, blksize); + } else { + skip_block--; } + blk_count++; } } + return 0; } /* diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 91c9f9586e0f..fa552f9f1666 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -187,6 +187,7 @@ struct dasd_ccw_req { void (*callback)(struct dasd_ccw_req *, void *data); void *callback_data; + unsigned int proc_bytes; /* bytes for partial completion */ }; /* @@ -387,8 +388,9 @@ struct dasd_discipline { int (*ext_pool_warn_thrshld)(struct dasd_device *); int (*ext_pool_oos)(struct dasd_device *); int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *); - struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *); - void (*ese_read)(struct dasd_ccw_req *); + struct dasd_ccw_req *(*ese_format)(struct dasd_device *, + struct dasd_ccw_req *, struct irb *); + int (*ese_read)(struct dasd_ccw_req *, struct irb *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -474,6 +476,11 @@ struct dasd_profile { spinlock_t lock; }; +struct dasd_format_entry { + struct list_head list; + sector_t track; +}; + struct dasd_device { /* Block device stuff. */ struct dasd_block *block; @@ -539,6 +546,7 @@ struct dasd_device { struct dentry *debugfs_dentry; struct dentry *hosts_dentry; struct dasd_profile profile; + struct dasd_format_entry format_entry; }; struct dasd_block { @@ -564,6 +572,9 @@ struct dasd_block { struct dentry *debugfs_dentry; struct dasd_profile profile; + + struct list_head format_list; + spinlock_t format_lock; }; struct dasd_attention_data { diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index da642e811f7f..4dd2eb634856 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c @@ -303,8 +303,10 @@ static void * cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) { struct ccwdev_iter *iter; + loff_t p = *offset; - if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) + (*offset)++; + if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) return NULL; iter = it; if (iter->devno == __MAX_SUBCHANNEL) { @@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset) return NULL; } else iter->devno++; - (*offset)++; return iter; } diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 51038ec309c1..dfcbe54591fb 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -135,7 +135,7 @@ static ssize_t chp_measurement_chars_read(struct file *filp, struct channel_path *chp; struct device *device; - device = container_of(kobj, struct device, kobj); + device = kobj_to_dev(kobj); chp = to_channelpath(device); if (chp->cmg == -1) return 0; @@ -184,7 +184,7 @@ static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj, struct device *device; unsigned int size; - device = container_of(kobj, struct device, kobj); + device = kobj_to_dev(kobj); chp = to_channelpath(device); css = to_css(chp->dev.parent); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 4b0798472643..ff74eb5fce50 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -182,11 +182,9 @@ enum qdio_queue_irq_states { }; struct qdio_input_q { - /* input buffer acknowledgement flag */ - int polling; /* first ACK'ed buffer */ int ack_start; - /* how much sbals are acknowledged with qebsm */ + /* how many SBALs are acknowledged */ int ack_count; /* last time of noticing incoming data */ u64 timestamp; diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 35410e6eda2e..9c0370b27426 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -124,9 +124,8 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "nr_used: %d ftc: %d\n", atomic_read(&q->nr_buf_used), q->first_to_check); if (q->is_input_q) { - seq_printf(m, "polling: %d ack start: %d ack count: %d\n", - q->u.in.polling, q->u.in.ack_start, - q->u.in.ack_count); + seq_printf(m, "ack start: %d ack count: %d\n", + q->u.in.ack_start, q->u.in.ack_count); seq_printf(m, "DSCI: %x IRQs disabled: %u\n", *(u8 *)q->irq_ptr->dsci, test_bit(QDIO_QUEUE_IRQS_DISABLED, diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index f8b897b7e78b..3475317c42e5 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -393,19 +393,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, static inline void qdio_stop_polling(struct qdio_q *q) { - if (!q->u.in.polling) + if (!q->u.in.ack_count) return; - q->u.in.polling = 0; qperf_inc(q, stop_polling); /* show the card that we are not polling anymore */ - if (is_qebsm(q)) { - set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, - q->u.in.ack_count); - q->u.in.ack_count = 0; - } else - set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); + set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, + q->u.in.ack_count); + q->u.in.ack_count = 0; } static inline void account_sbals(struct qdio_q *q, unsigned int count) @@ -451,8 +447,7 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start, /* for QEBSM the ACK was already set by EQBS */ if (is_qebsm(q)) { - if (!q->u.in.polling) { - q->u.in.polling = 1; + if (!q->u.in.ack_count) { q->u.in.ack_count = count; q->u.in.ack_start = start; return; @@ -471,12 +466,12 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start, * or by the next inbound run. */ new = add_buf(start, count - 1); - if (q->u.in.polling) { + if (q->u.in.ack_count) { /* reset the previous ACK but first set the new one */ set_buf_state(q, new, SLSB_P_INPUT_ACK); set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); } else { - q->u.in.polling = 1; + q->u.in.ack_count = 1; set_buf_state(q, new, SLSB_P_INPUT_ACK); } @@ -1479,13 +1474,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, qperf_inc(q, inbound_call); - if (!q->u.in.polling) + if (!q->u.in.ack_count) goto set; /* protect against stop polling setting an ACK for an emptied slsb */ if (count == QDIO_MAX_BUFFERS_PER_Q) { /* overwriting everything, just delete polling status */ - q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { @@ -1495,15 +1489,14 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags, diff = sub_buf(diff, q->u.in.ack_start); q->u.in.ack_count -= diff; if (q->u.in.ack_count <= 0) { - q->u.in.polling = 0; q->u.in.ack_count = 0; goto set; } q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); + } else { + /* the only ACK will be deleted */ + q->u.in.ack_count = 0; } - else - /* the only ACK will be deleted, so stop polling */ - q->u.in.polling = 0; } set: diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index dc430bd86ade..e115623b86b2 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/io.h> #include <asm/qdio.h> #include "cio.h" @@ -205,7 +206,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, /* fill in sl */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) - q->sl->element[j].sbal = (unsigned long)q->sbal[j]; + q->sl->element[j].sbal = virt_to_phys(q->sbal[j]); } static void setup_queues(struct qdio_irq *irq_ptr, @@ -536,7 +537,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, int qdio_enable_async_operation(struct qdio_output_q *outq) { outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *), - GFP_ATOMIC); + GFP_KERNEL); if (!outq->aobs) { outq->use_cq = 0; return -ENOMEM; diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h index 30162a318a8a..f5d31887d413 100644 --- a/drivers/s390/cio/vfio_ccw_trace.h +++ b/drivers/s390/cio/vfio_ccw_trace.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Tracepoints for vfio_ccw driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Tracepoints for vfio_ccw driver * * Copyright IBM Corp. 2018 * diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index bb35ba4a8d24..4348fdff1c61 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -162,7 +162,7 @@ struct ap_card { unsigned int functions; /* AP device function bitfield. */ int queue_depth; /* AP queue depth.*/ int id; /* AP card number. */ - atomic_t total_request_count; /* # requests ever for this AP device.*/ + atomic64_t total_request_count; /* # requests ever for this AP device.*/ }; #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device) @@ -179,7 +179,7 @@ struct ap_queue { enum ap_state state; /* State of the AP device. */ int pendingq_count; /* # requests on pendingq list. */ int requestq_count; /* # requests on requestq list. */ - int total_request_count; /* # requests ever for this AP device.*/ + u64 total_request_count; /* # requests ever for this AP device.*/ int request_timeout; /* Request timeout in jiffies. */ struct timer_list timeout; /* Timer for request timeouts. */ struct list_head pendingq; /* List of message sent to AP queue. */ diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 63b4cc6cd7e5..e85bfca1ed16 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_card *ac = to_ap_card(dev); - unsigned int req_cnt; + u64 req_cnt; req_cnt = 0; spin_lock_bh(&ap_list_lock); - req_cnt = atomic_read(&ac->total_request_count); + req_cnt = atomic64_read(&ac->total_request_count); spin_unlock_bh(&ap_list_lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); } static ssize_t request_count_store(struct device *dev, @@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev, for_each_ap_queue(aq, ac) aq->total_request_count = 0; spin_unlock_bh(&ap_list_lock); - atomic_set(&ac->total_request_count, 0); + atomic64_set(&ac->total_request_count, 0); return count; } diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 37c3bdc3642d..a317ab484932 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev, char *buf) { struct ap_queue *aq = to_ap_queue(dev); - unsigned int req_cnt; + u64 req_cnt; spin_lock_bh(&aq->lock); req_cnt = aq->total_request_count; spin_unlock_bh(&aq->lock); - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt); + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt); } static ssize_t request_count_store(struct device *dev, @@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) list_add_tail(&ap_msg->list, &aq->requestq); aq->requestq_count++; aq->total_request_count++; - atomic_inc(&aq->card->total_request_count); + atomic64_inc(&aq->card->total_request_count); /* Send/receive as many request from the queue as possible. */ ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL)); spin_unlock_bh(&aq->lock); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 71dae64ba994..2f33c5fcf676 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -994,7 +994,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; rc = cca_sec2protkey(ksp.cardnr, ksp.domain, ksp.seckey.seckey, ksp.protkey.protkey, - NULL, &ksp.protkey.type); + &ksp.protkey.len, &ksp.protkey.type); DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc); if (rc) break; diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index a42257d6c79e..56a405dce8bc 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -606,8 +606,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc, weight += atomic_read(&zc->load); pref_weight += atomic_read(&pref_zc->load); if (weight == pref_weight) - return atomic_read(&zc->card->total_request_count) > - atomic_read(&pref_zc->card->total_request_count); + return atomic64_read(&zc->card->total_request_count) > + atomic64_read(&pref_zc->card->total_request_count); return weight > pref_weight; } @@ -1226,11 +1226,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) spin_unlock(&zcrypt_list_lock); } -static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) +static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) { struct zcrypt_card *zc; struct zcrypt_queue *zq; int card; + u64 cnt; memset(reqcnt, 0, sizeof(int) * max_adapters); spin_lock(&zcrypt_list_lock); @@ -1242,8 +1243,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) || card >= max_adapters) continue; spin_lock(&zq->queue->lock); - reqcnt[card] = zq->queue->total_request_count; + cnt = zq->queue->total_request_count; spin_unlock(&zq->queue->lock); + reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; } } local_bh_enable(); @@ -1421,9 +1423,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, return 0; } case ZCRYPT_PERDEV_REQCNT: { - int *reqcnt; + u32 *reqcnt; - reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); + reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); @@ -1480,7 +1482,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, } case Z90STAT_PERDEV_REQCNT: { /* the old ioctl supports only 64 adapters */ - int reqcnt[MAX_ZDEV_CARDIDS]; + u32 reqcnt[MAX_ZDEV_CARDIDS]; zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index d4caf46ff9df..2afe2153b34e 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -887,7 +887,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, /* empty pin tag */ *p++ = 0x04; *p++ = 0; - /* encrytped key value tag and bytes */ + /* encrypted key value tag and bytes */ p += asn1tag_write(p, 0x04, enckey, enckeysize); /* reply cprb and payload */ @@ -1095,7 +1095,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 1: generate AES 256 bit random kek key */ rc = ep11_genaeskey(card, domain, 256, - 0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */ + 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ kek, &keklen); if (rc) { DEBUG_ERR( diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 9575a627a1e1..468cada49e72 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -369,7 +369,7 @@ enum qeth_qdio_info_states { struct qeth_buffer_pool_entry { struct list_head list; struct list_head init_list; - void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; + struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; }; struct qeth_qdio_buffer_pool { @@ -983,7 +983,7 @@ extern const struct attribute_group qeth_device_blkt_group; extern const struct device_type qeth_generic_devtype; const char *qeth_get_cardname_short(struct qeth_card *); -int qeth_realloc_buffer_pool(struct qeth_card *, int); +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); void qeth_core_free_discipline(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9639938581f5..6d3f2f14b414 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -65,7 +65,6 @@ static struct lock_class_key qdio_out_skb_queue_key; static void qeth_issue_next_read_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob, unsigned int data_length); -static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, @@ -212,49 +211,121 @@ void qeth_clear_working_pool_list(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); +static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { + if (entry->elements[i]) + __free_page(entry->elements[i]); + } + + kfree(entry); +} + +static void qeth_free_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } +} + +static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) +{ + struct qeth_buffer_pool_entry *entry; + unsigned int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + for (i = 0; i < pages; i++) { + entry->elements[i] = alloc_page(GFP_KERNEL); + + if (!entry->elements[i]) { + qeth_free_pool_entry(entry); + return NULL; + } + } + + return entry; +} + static int qeth_alloc_buffer_pool(struct qeth_card *card) { - struct qeth_buffer_pool_entry *pool_entry; - void *ptr; - int i, j; + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + unsigned int i; QETH_CARD_TEXT(card, 5, "alocpool"); for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { - pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); - if (!pool_entry) { + struct qeth_buffer_pool_entry *entry; + + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { qeth_free_buffer_pool(card); return -ENOMEM; } - for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { - ptr = (void *) __get_free_page(GFP_KERNEL); - if (!ptr) { - while (j > 0) - free_page((unsigned long) - pool_entry->elements[--j]); - kfree(pool_entry); - qeth_free_buffer_pool(card); - return -ENOMEM; - } - pool_entry->elements[j] = ptr; - } - list_add(&pool_entry->init_list, - &card->qdio.init_pool.entry_list); + + list_add(&entry->init_list, &card->qdio.init_pool.entry_list); } return 0; } -int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) { + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; + struct qeth_buffer_pool_entry *entry, *tmp; + int delta = count - pool->buf_count; + LIST_HEAD(entries); + QETH_CARD_TEXT(card, 2, "realcbp"); - /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ - qeth_clear_working_pool_list(card); - qeth_free_buffer_pool(card); - card->qdio.in_buf_pool.buf_count = bufcnt; - card->qdio.init_pool.buf_count = bufcnt; - return qeth_alloc_buffer_pool(card); + /* Defer until queue is allocated: */ + if (!card->qdio.in_q) + goto out; + + /* Remove entries from the pool: */ + while (delta < 0) { + entry = list_first_entry(&pool->entry_list, + struct qeth_buffer_pool_entry, + init_list); + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + + delta++; + } + + /* Allocate additional entries: */ + while (delta > 0) { + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { + list_for_each_entry_safe(entry, tmp, &entries, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } + + return -ENOMEM; + } + + list_add(&entry->init_list, &entries); + + delta--; + } + + list_splice(&entries, &pool->entry_list); + +out: + card->qdio.in_buf_pool.buf_count = count; + pool->buf_count = count; + return 0; } -EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); +EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); static void qeth_free_qdio_queue(struct qeth_qdio_q *q) { @@ -1128,9 +1199,10 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_tx_complete_buf(buf, error, budget); for (i = 0; i < queue->max_elements; ++i) { - if (buf->buffer->element[i].addr && buf->is_header[i]) - kmem_cache_free(qeth_core_header_cache, - buf->buffer->element[i].addr); + void *data = phys_to_virt(buf->buffer->element[i].addr); + + if (data && buf->is_header[i]) + kmem_cache_free(qeth_core_header_cache, data); buf->is_header[i] = 0; } @@ -1169,19 +1241,6 @@ void qeth_drain_output_queues(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_drain_output_queues); -static void qeth_free_buffer_pool(struct qeth_card *card) -{ - struct qeth_buffer_pool_entry *pool_entry, *tmp; - int i = 0; - list_for_each_entry_safe(pool_entry, tmp, - &card->qdio.init_pool.entry_list, init_list){ - for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) - free_page((unsigned long)pool_entry->elements[i]); - list_del(&pool_entry->init_list); - kfree(pool_entry); - } -} - static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) { unsigned int count = single ? 1 : card->dev->num_tx_queues; @@ -1203,7 +1262,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single) if (count == 1) dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); - card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; card->qdio.no_out_queues = count; return 0; } @@ -2392,7 +2450,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card) return; qeth_free_cq(card); - cancel_delayed_work_sync(&card->buffer_reclaim_work); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); @@ -2574,7 +2631,6 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( struct list_head *plh; struct qeth_buffer_pool_entry *entry; int i, free; - struct page *page; if (list_empty(&card->qdio.in_buf_pool.entry_list)) return NULL; @@ -2583,7 +2639,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( entry = list_entry(plh, struct qeth_buffer_pool_entry, list); free = 1; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { + if (page_count(entry->elements[i]) > 1) { free = 0; break; } @@ -2598,15 +2654,15 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( entry = list_entry(card->qdio.in_buf_pool.entry_list.next, struct qeth_buffer_pool_entry, list); for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { - if (page_count(virt_to_page(entry->elements[i])) > 1) { - page = alloc_page(GFP_ATOMIC); - if (!page) { + if (page_count(entry->elements[i]) > 1) { + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) return NULL; - } else { - free_page((unsigned long)entry->elements[i]); - entry->elements[i] = page_address(page); - QETH_CARD_STAT_INC(card, rx_sg_alloc_page); - } + + __free_page(entry->elements[i]); + entry->elements[i] = page; + QETH_CARD_STAT_INC(card, rx_sg_alloc_page); } } list_del_init(&entry->list); @@ -2624,12 +2680,12 @@ static int qeth_init_input_buffer(struct qeth_card *card, ETH_HLEN + sizeof(struct ipv6hdr)); if (!buf->rx_skb) - return 1; + return -ENOMEM; } pool_entry = qeth_find_free_buffer_pool_entry(card); if (!pool_entry) - return 1; + return -ENOBUFS; /* * since the buffer is accessed only from the input_tasklet @@ -2641,7 +2697,8 @@ static int qeth_init_input_buffer(struct qeth_card *card, buf->pool_entry = pool_entry; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; - buf->buffer->element[i].addr = pool_entry->elements[i]; + buf->buffer->element[i].addr = + page_to_phys(pool_entry->elements[i]); if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; else @@ -2673,10 +2730,15 @@ static int qeth_init_qdio_queues(struct qeth_card *card) /* inbound queue */ qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); memset(&card->rx, 0, sizeof(struct qeth_rx)); + qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ - for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) - qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) { + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + if (rc) + return rc; + } + card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, @@ -3459,9 +3521,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && buffer->element[e].addr) { - unsigned long phys_aob_addr; + unsigned long phys_aob_addr = buffer->element[e].addr; - phys_aob_addr = (unsigned long) buffer->element[e].addr; qeth_qdio_handle_aob(card, phys_aob_addr); ++e; } @@ -3750,7 +3811,7 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb, elem_length = min_t(unsigned int, length, PAGE_SIZE - offset_in_page(data)); - buffer->element[element].addr = data; + buffer->element[element].addr = virt_to_phys(data); buffer->element[element].length = elem_length; length -= elem_length; if (is_first_elem) { @@ -3780,7 +3841,7 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb, elem_length = min_t(unsigned int, length, PAGE_SIZE - offset_in_page(data)); - buffer->element[element].addr = data; + buffer->element[element].addr = virt_to_phys(data); buffer->element[element].length = elem_length; buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; @@ -3820,7 +3881,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, int element = buf->next_element_to_fill; is_first_elem = false; - buffer->element[element].addr = hdr; + buffer->element[element].addr = virt_to_phys(hdr); buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; /* remember to free cache-allocated qeth_hdr: */ @@ -4746,10 +4807,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card, if (card->options.cq == QETH_CQ_ENABLED) { int offset = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1); - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - in_sbal_ptrs[offset + i] = (struct qdio_buffer *) - virt_to_phys(card->qdio.c_q->bufs[i].buffer); - } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[offset + i] = + card->qdio.c_q->bufs[i].buffer; queue_start_poll[card->qdio.no_in_queues - 1] = NULL; } @@ -4783,10 +4844,9 @@ static int qeth_qdio_establish(struct qeth_card *card) rc = -ENOMEM; goto out_free_qib_param; } - for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { - in_sbal_ptrs[i] = (struct qdio_buffer *) - virt_to_phys(card->qdio.in_q->bufs[i].buffer); - } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer; queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), GFP_KERNEL); @@ -4807,11 +4867,11 @@ static int qeth_qdio_establish(struct qeth_card *card) rc = -ENOMEM; goto out_free_queue_start_poll; } + for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { - out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( - card->qdio.out_qs[i]->bufs[j]->buffer); - } + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++) + out_sbal_ptrs[k] = + card->qdio.out_qs[i]->bufs[j]->buffer; memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); @@ -5289,7 +5349,7 @@ next_packet: offset = 0; } - hdr = element->addr + offset; + hdr = phys_to_virt(element->addr) + offset; offset += sizeof(*hdr); skb = NULL; @@ -5344,7 +5404,7 @@ next_packet: } use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || - ((skb_len >= card->options.rx_sg_cb) && + (skb_len > card->options.rx_sg_cb && !atomic_read(&card->force_alloc_skb) && !IS_OSN(card)); @@ -5388,7 +5448,7 @@ use_skb: walk_packet: while (skb_len) { int data_len = min(skb_len, (int)(element->length - offset)); - char *data = element->addr + offset; + char *data = phys_to_virt(element->addr) + offset; skb_len -= data_len; offset += data_len; @@ -5447,7 +5507,6 @@ static int qeth_extract_skbs(struct qeth_card *card, int budget, { int work_done = 0; - WARN_ON_ONCE(!budget); *done = false; while (budget) { diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 2bd9993aa60b..78cae61bc924 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -247,8 +247,8 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); + unsigned int cnt; char *tmp; - int cnt, old_cnt; int rc = 0; mutex_lock(&card->conf_mutex); @@ -257,13 +257,12 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, goto out; } - old_cnt = card->qdio.in_buf_pool.buf_count; cnt = simple_strtoul(buf, &tmp, 10); cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN : ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); - if (old_cnt != cnt) { - rc = qeth_realloc_buffer_pool(card, cnt); - } + + rc = qeth_resize_buffer_pool(card, cnt); + out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 692bd2623401..8fb29371788b 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -284,6 +284,7 @@ static void qeth_l2_stop_card(struct qeth_card *card) if (card->state == CARD_STATE_SOFTSETUP) { qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; } @@ -1707,15 +1708,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state) QETH_CARD_TEXT(card, 2, "vniccsch"); - /* do not change anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and enable/disable are supported */ if (!(card->options.vnicc.sup_chars & vnicc) || !(card->options.vnicc.set_char_sup & vnicc)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* set enable/disable command and store wanted characteristic */ if (state) { cmd = IPA_VNICC_ENABLE; @@ -1761,14 +1761,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state) QETH_CARD_TEXT(card, 2, "vniccgch"); - /* do not get anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic is supported */ if (!(card->options.vnicc.sup_chars & vnicc)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* if card is ready, query current VNICC state */ if (qeth_card_hw_is_reachable(card)) rc = qeth_l2_vnicc_query_chars(card); @@ -1786,15 +1785,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout) QETH_CARD_TEXT(card, 2, "vniccsto"); - /* do not change anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and set_timeout are supported */ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) return -EOPNOTSUPP; + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* do we need to do anything? */ if (card->options.vnicc.learning_timeout == timeout) return rc; @@ -1823,14 +1821,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) QETH_CARD_TEXT(card, 2, "vniccgto"); - /* do not get anything if BridgePort is enabled */ - if (qeth_bridgeport_is_in_use(card)) - return -EBUSY; - /* check if characteristic and get_timeout are supported */ if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + /* if card is ready, get timeout. Otherwise, just return stored value */ *timeout = card->options.vnicc.learning_timeout; if (qeth_card_hw_is_reachable(card)) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 317d56647a4a..82f800d1d7b3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1178,6 +1178,7 @@ static void qeth_l3_stop_card(struct qeth_card *card) qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; } diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 29f2517d2a31..a3d1c3bdfadb 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -206,12 +206,11 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) { card->options.sniffer = i; - if (card->qdio.init_pool.buf_count != - QETH_IN_BUF_COUNT_MAX) - qeth_realloc_buffer_pool(card, - QETH_IN_BUF_COUNT_MAX); - } else + qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX); + } else { rc = -EPERM; + } + break; default: rc = -EINVAL; diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 223a805f0b0b..cae9b7ff79b0 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -2510,7 +2510,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { sbale = &sbal->element[idx]; - req_id = (unsigned long) sbale->addr; + req_id = sbale->addr; fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); if (!fsf_req) { diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h index 2b1e4da1944f..4bfb79f20588 100644 --- a/drivers/s390/scsi/zfcp_fsf.h +++ b/drivers/s390/scsi/zfcp_fsf.h @@ -410,7 +410,7 @@ struct fsf_qtcb_bottom_port { u8 cb_util; u8 a_util; u8 res2; - u16 temperature; + s16 temperature; u16 vcc; u16 tx_bias; u16 tx_power; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 661436a92f8e..f0d6296e673b 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -98,7 +98,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); sbale = qdio->res_q[idx]->element; - req_id = (u64) sbale->addr; + req_id = sbale->addr; scount = min(sbale->scount + 1, ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); /* incl. signaling SBAL */ @@ -199,7 +199,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, q_req->sbal_number); return -EINVAL; } - sbale->addr = sg_virt(sg); + sbale->addr = sg_phys(sg); sbale->length = sg->length; } return 0; @@ -418,7 +418,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) sbale->length = 0; sbale->eflags = SBAL_EFLAGS_LAST_ENTRY; sbale->sflags = 0; - sbale->addr = NULL; + sbale->addr = 0; } if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 2a816a37b3c0..6b43d6b254be 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h @@ -122,14 +122,14 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, % QDIO_MAX_BUFFERS_PER_Q; sbale = zfcp_qdio_sbale_req(qdio, q_req); - sbale->addr = (void *) req_id; + sbale->addr = req_id; sbale->eflags = 0; sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype; if (unlikely(!data)) return; sbale++; - sbale->addr = data; + sbale->addr = virt_to_phys(data); sbale->length = len; } @@ -152,7 +152,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); q_req->sbale_curr++; sbale = zfcp_qdio_sbale_curr(qdio, q_req); - sbale->addr = data; + sbale->addr = virt_to_phys(data); sbale->length = len; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 494b9fe9cc94..a711a0d15100 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -800,7 +800,7 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400, static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \ zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL) -ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu"); +ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd"); ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu"); ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu"); ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu"); |