diff options
Diffstat (limited to 'drivers/s390')
46 files changed, 1644 insertions, 1007 deletions
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 6e294b4d3635..f89f9d02e788 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2004,14 +2004,14 @@ static int dasd_eckd_end_analysis(struct dasd_block *block) blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); raw: - block->blocks = (private->real_cyl * + block->blocks = ((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk); dev_info(&device->cdev->dev, - "DASD with %d KB/block, %d KB total size, %d KB/track, " + "DASD with %u KB/block, %lu KB total size, %u KB/track, " "%s\n", (block->bp_block >> 10), - ((private->real_cyl * + (((unsigned long) private->real_cyl * private->rdc_data.trk_per_cyl * blk_per_trk * (block->bp_block >> 9)) >> 1), ((blk_per_trk * block->bp_block) >> 10), diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index fd2146bcc0ad..e17364e13d2f 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -629,7 +629,7 @@ con3270_init(void) (void (*)(unsigned long)) con3270_read_tasklet, (unsigned long) condev->read); - raw3270_add_view(&condev->view, &con3270_fn, 1); + raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ); INIT_LIST_HEAD(&condev->freemem); for (i = 0; i < CON3270_STRING_PAGES; i++) { diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 8f3a2eeb28dc..4c4683d8784a 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -463,7 +463,8 @@ fs3270_open(struct inode *inode, struct file *filp) init_waitqueue_head(&fp->wait); fp->fs_pid = get_pid(task_pid(current)); - rc = raw3270_add_view(&fp->view, &fs3270_fn, minor); + rc = raw3270_add_view(&fp->view, &fs3270_fn, minor, + RAW3270_VIEW_LOCK_BH); if (rc) { fs3270_free_view(&fp->view); goto out; @@ -485,7 +486,7 @@ fs3270_open(struct inode *inode, struct file *filp) raw3270_del_view(&fp->view); goto out; } - nonseekable_open(inode, filp); + stream_open(inode, filp); filp->private_data = fp; out: mutex_unlock(&fs3270_mutex); diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index f8cd2935fbfd..63a41b168761 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c @@ -920,7 +920,7 @@ raw3270_deactivate_view(struct raw3270_view *view) * Add view to device with minor "minor". */ int -raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) +raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass) { unsigned long flags; struct raw3270 *rp; @@ -942,6 +942,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) view->cols = rp->cols; view->ascebc = rp->ascebc; spin_lock_init(&view->lock); + lockdep_set_subclass(&view->lock, subclass); list_add(&view->list, &rp->view_list); rc = 0; spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 114ca7cbf889..3afaa35f7351 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h @@ -150,6 +150,8 @@ struct raw3270_fn { struct raw3270_view { struct list_head list; spinlock_t lock; +#define RAW3270_VIEW_LOCK_IRQ 0 +#define RAW3270_VIEW_LOCK_BH 1 atomic_t ref_count; struct raw3270 *dev; struct raw3270_fn *fn; @@ -158,7 +160,7 @@ struct raw3270_view { unsigned char *ascebc; /* ascii -> ebcdic table */ }; -int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int); +int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int); int raw3270_activate_view(struct raw3270_view *); void raw3270_del_view(struct raw3270_view *); void raw3270_deactivate_view(struct raw3270_view *); diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index e9aa71cdfc44..d2ab3f07c008 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -45,8 +45,8 @@ static struct list_head sclp_req_queue; /* Data for read and and init requests. */ static struct sclp_req sclp_read_req; static struct sclp_req sclp_init_req; -static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); -static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); +static void *sclp_read_sccb; +static struct init_sccb *sclp_init_sccb; /* Suspend request */ static DECLARE_COMPLETION(sclp_request_queue_flushed); @@ -753,9 +753,8 @@ EXPORT_SYMBOL(sclp_remove_processed); static inline void __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask) { - struct init_sccb *sccb; + struct init_sccb *sccb = sclp_init_sccb; - sccb = (struct init_sccb *) sclp_init_sccb; clear_page(sccb); memset(&sclp_init_req, 0, sizeof(struct sclp_req)); sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; @@ -782,7 +781,7 @@ static int sclp_init_mask(int calculate) { unsigned long flags; - struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; + struct init_sccb *sccb = sclp_init_sccb; sccb_mask_t receive_mask; sccb_mask_t send_mask; int retry; @@ -1175,6 +1174,9 @@ sclp_init(void) if (sclp_init_state != sclp_init_state_uninitialized) goto fail_unlock; sclp_init_state = sclp_init_state_initializing; + sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); + sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); + BUG_ON(!sclp_read_sccb || !sclp_init_sccb); /* Set up variables */ INIT_LIST_HEAD(&sclp_req_queue); INIT_LIST_HEAD(&sclp_reg_list); @@ -1207,6 +1209,8 @@ fail_unregister_reboot_notifier: unregister_reboot_notifier(&sclp_reboot_notifier); fail_init_state_uninitialized: sclp_init_state = sclp_init_state_uninitialized; + free_page((unsigned long) sclp_read_sccb); + free_page((unsigned long) sclp_init_sccb); fail_unlock: spin_unlock_irqrestore(&sclp_lock, flags); return rc; diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 367e9d384d85..196333013e54 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -197,7 +197,9 @@ struct read_info_sccb { u32 hmfai; /* 124-127 */ u8 _pad_128[134 - 128]; /* 128-133 */ u8 byte_134; /* 134 */ - u8 _pad_135[4096 - 135]; /* 135-4095 */ + u8 cpudirq; /* 135 */ + u16 cbl; /* 136-137 */ + u8 _pad_138[4096 - 138]; /* 138-4095 */ } __packed __aligned(PAGE_SIZE); struct read_storage_sccb { @@ -319,7 +321,7 @@ extern int sclp_console_drop; extern unsigned long sclp_console_full; extern bool sclp_mask_compat_mode; -extern char sclp_early_sccb[PAGE_SIZE]; +extern char *sclp_early_sccb; void sclp_early_wait_irq(void); int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb); @@ -365,14 +367,14 @@ sclp_ascebc(unsigned char ch) /* translate string from EBCDIC to ASCII */ static inline void -sclp_ebcasc_str(unsigned char *str, int nr) +sclp_ebcasc_str(char *str, int nr) { (MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr); } /* translate string from ASCII to EBCDIC */ static inline void -sclp_ascebc_str(unsigned char *str, int nr) +sclp_ascebc_str(char *str, int nr) { (MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr); } diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index 8332788681c4..6c90aa725f23 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -40,6 +40,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) sclp.has_gisaf = !!(sccb->fac118 & 0x08); sclp.has_hvs = !!(sccb->fac119 & 0x80); sclp.has_kss = !!(sccb->fac98 & 0x01); + sclp.has_sipl = !!(sccb->cbl & 0x02); + sclp.has_sipl_g2 = !!(sccb->cbl & 0x04); if (sccb->fac85 & 0x02) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) @@ -93,6 +95,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0; sclp.hmfai = sccb->hmfai; + sclp.has_dirq = !!(sccb->cpudirq & 0x80); } /* @@ -144,7 +147,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb) void __init sclp_early_detect(void) { - void *sccb = &sclp_early_sccb; + void *sccb = sclp_early_sccb; sclp_early_facilities_detect(sccb); sclp_early_init_core_info(sccb); diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 387c114ded3f..7737470f8498 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -16,7 +16,7 @@ static struct read_info_sccb __bootdata(sclp_info_sccb); static int __bootdata(sclp_info_sccb_valid); -char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); +char *sclp_early_sccb = (char *) EARLY_SCCB_OFFSET; int sclp_init_state __section(.data) = sclp_init_state_uninitialized; /* * Used to keep track of the size of the event masks. Qemu until version 2.11 @@ -91,8 +91,8 @@ static void sclp_early_print_lm(const char *str, unsigned int len) struct mto *mto; struct go *go; - sccb = (struct write_sccb *) &sclp_early_sccb; - end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1; + sccb = (struct write_sccb *) sclp_early_sccb; + end = (unsigned char *) sccb + EARLY_SCCB_SIZE - 1; memset(sccb, 0, sizeof(*sccb)); ptr = (unsigned char *) &sccb->msg.mdb.mto; offset = 0; @@ -139,9 +139,9 @@ static void sclp_early_print_vt220(const char *str, unsigned int len) { struct vt220_sccb *sccb; - sccb = (struct vt220_sccb *) &sclp_early_sccb; - if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb)) - len = sizeof(sclp_early_sccb) - sizeof(*sccb); + sccb = (struct vt220_sccb *) sclp_early_sccb; + if (sizeof(*sccb) + len >= EARLY_SCCB_SIZE) + len = EARLY_SCCB_SIZE - sizeof(*sccb); memset(sccb, 0, sizeof(*sccb)); memcpy(&sccb->msg.data, str, len); sccb->header.length = sizeof(*sccb) + len; @@ -199,7 +199,7 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220) BUILD_BUG_ON(sizeof(struct init_sccb) > PAGE_SIZE); *have_linemode = *have_vt220 = 0; - sccb = (struct init_sccb *) &sclp_early_sccb; + sccb = (struct init_sccb *) sclp_early_sccb; receive_mask = disable ? 0 : EVTYP_OPCMD_MASK; send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK; rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask); @@ -304,7 +304,7 @@ int __init sclp_early_get_hsa_size(unsigned long *hsa_size) void __weak __init add_mem_detect_block(u64 start, u64 end) {} int __init sclp_early_read_storage_info(void) { - struct read_storage_sccb *sccb = (struct read_storage_sccb *)&sclp_early_sccb; + struct read_storage_sccb *sccb = (struct read_storage_sccb *)sclp_early_sccb; int rc, id, max_id = 0; unsigned long rn, rzm; sclp_cmdw_t command; @@ -320,8 +320,8 @@ int __init sclp_early_read_storage_info(void) rzm <<= 20; for (id = 0; id <= max_id; id++) { - memset(sclp_early_sccb, 0, sizeof(sclp_early_sccb)); - sccb->header.length = sizeof(sclp_early_sccb); + memset(sclp_early_sccb, 0, EARLY_SCCB_SIZE); + sccb->header.length = EARLY_SCCB_SIZE; command = SCLP_CMDW_READ_STORAGE_INFO | (id << 8); rc = sclp_early_cmd(command, sccb); if (rc) diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 8e0b69a2f11a..13f97fd73aca 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -29,7 +29,7 @@ static struct sclp_register sclp_sdias_register = { .send_mask = EVTYP_SDIAS_MASK, }; -static struct sdias_sccb sccb __attribute__((aligned(4096))); +static struct sdias_sccb *sclp_sdias_sccb; static struct sdias_evbuf sdias_evbuf; static DECLARE_COMPLETION(evbuf_accepted); @@ -58,6 +58,7 @@ static void sdias_callback(struct sclp_req *request, void *data) static int sdias_sclp_send(struct sclp_req *req) { + struct sdias_sccb *sccb = sclp_sdias_sccb; int retries; int rc; @@ -78,16 +79,16 @@ static int sdias_sclp_send(struct sclp_req *req) continue; } /* if not accepted, retry */ - if (!(sccb.evbuf.hdr.flags & 0x80)) { + if (!(sccb->evbuf.hdr.flags & 0x80)) { TRACE("sclp request failed: flags=%x\n", - sccb.evbuf.hdr.flags); + sccb->evbuf.hdr.flags); continue; } /* * for the sync interface the response is in the initial sccb */ if (!sclp_sdias_register.receiver_fn) { - memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf)); + memcpy(&sdias_evbuf, &sccb->evbuf, sizeof(sdias_evbuf)); TRACE("sync request done\n"); return 0; } @@ -104,23 +105,24 @@ static int sdias_sclp_send(struct sclp_req *req) */ int sclp_sdias_blk_count(void) { + struct sdias_sccb *sccb = sclp_sdias_sccb; struct sclp_req request; int rc; mutex_lock(&sdias_mutex); - memset(&sccb, 0, sizeof(sccb)); + memset(sccb, 0, sizeof(*sccb)); memset(&request, 0, sizeof(request)); - sccb.hdr.length = sizeof(sccb); - sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); - sccb.evbuf.hdr.type = EVTYP_SDIAS; - sccb.evbuf.event_qual = SDIAS_EQ_SIZE; - sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; - sccb.evbuf.event_id = 4712; - sccb.evbuf.dbs = 1; + sccb->hdr.length = sizeof(*sccb); + sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb->evbuf.hdr.type = EVTYP_SDIAS; + sccb->evbuf.event_qual = SDIAS_EQ_SIZE; + sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; + sccb->evbuf.event_id = 4712; + sccb->evbuf.dbs = 1; - request.sccb = &sccb; + request.sccb = sccb; request.command = SCLP_CMDW_WRITE_EVENT_DATA; request.status = SCLP_REQ_FILLED; request.callback = sdias_callback; @@ -130,8 +132,8 @@ int sclp_sdias_blk_count(void) pr_err("sclp_send failed for get_nr_blocks\n"); goto out; } - if (sccb.hdr.response_code != 0x0020) { - TRACE("send failed: %x\n", sccb.hdr.response_code); + if (sccb->hdr.response_code != 0x0020) { + TRACE("send failed: %x\n", sccb->hdr.response_code); rc = -EIO; goto out; } @@ -163,30 +165,31 @@ out: */ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) { + struct sdias_sccb *sccb = sclp_sdias_sccb; struct sclp_req request; int rc; mutex_lock(&sdias_mutex); - memset(&sccb, 0, sizeof(sccb)); + memset(sccb, 0, sizeof(*sccb)); memset(&request, 0, sizeof(request)); - sccb.hdr.length = sizeof(sccb); - sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf); - sccb.evbuf.hdr.type = EVTYP_SDIAS; - sccb.evbuf.hdr.flags = 0; - sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA; - sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP; - sccb.evbuf.event_id = 4712; - sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64; - sccb.evbuf.event_status = 0; - sccb.evbuf.blk_cnt = nr_blks; - sccb.evbuf.asa = (unsigned long)dest; - sccb.evbuf.fbn = start_blk; - sccb.evbuf.lbn = 0; - sccb.evbuf.dbs = 1; - - request.sccb = &sccb; + sccb->hdr.length = sizeof(*sccb); + sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf); + sccb->evbuf.hdr.type = EVTYP_SDIAS; + sccb->evbuf.hdr.flags = 0; + sccb->evbuf.event_qual = SDIAS_EQ_STORE_DATA; + sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP; + sccb->evbuf.event_id = 4712; + sccb->evbuf.asa_size = SDIAS_ASA_SIZE_64; + sccb->evbuf.event_status = 0; + sccb->evbuf.blk_cnt = nr_blks; + sccb->evbuf.asa = (unsigned long)dest; + sccb->evbuf.fbn = start_blk; + sccb->evbuf.lbn = 0; + sccb->evbuf.dbs = 1; + + request.sccb = sccb; request.command = SCLP_CMDW_WRITE_EVENT_DATA; request.status = SCLP_REQ_FILLED; request.callback = sdias_callback; @@ -196,8 +199,8 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) pr_err("sclp_send failed: %x\n", rc); goto out; } - if (sccb.hdr.response_code != 0x0020) { - TRACE("copy failed: %x\n", sccb.hdr.response_code); + if (sccb->hdr.response_code != 0x0020) { + TRACE("copy failed: %x\n", sccb->hdr.response_code); rc = -EIO; goto out; } @@ -256,6 +259,8 @@ int __init sclp_sdias_init(void) { if (ipl_info.type != IPL_TYPE_FCP_DUMP) return 0; + sclp_sdias_sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); + BUG_ON(!sclp_sdias_sccb); sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); debug_register_view(sdias_dbf, &debug_sprintf_view); debug_set_level(sdias_dbf, 6); @@ -264,6 +269,7 @@ int __init sclp_sdias_init(void) if (sclp_sdias_init_async() == 0) goto out; TRACE("init failed\n"); + free_page((unsigned long) sclp_sdias_sccb); return -ENODEV; out: TRACE("init done\n"); diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index fc206c9d1c56..ea4253939555 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -290,7 +290,7 @@ tapechar_open (struct inode *inode, struct file *filp) rc = tape_open(device); if (rc == 0) { filp->private_data = device; - nonseekable_open(inode, filp); + stream_open(inode, filp); } else tape_put_device(device); diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 2b0c36c2c568..98d7fc152e32 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c @@ -980,7 +980,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) return PTR_ERR(tp); rc = raw3270_add_view(&tp->view, &tty3270_fn, - tty->index + RAW3270_FIRSTMINOR); + tty->index + RAW3270_FIRSTMINOR, + RAW3270_VIEW_LOCK_BH); if (rc) { tty3270_free_view(tp); return rc; diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 76d3c50bf078..405a60538630 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -51,7 +51,7 @@ static struct dentry *zcore_dir; static struct dentry *zcore_memmap_file; static struct dentry *zcore_reipl_file; static struct dentry *zcore_hsa_file; -static struct ipl_parameter_block *ipl_block; +static struct ipl_parameter_block *zcore_ipl_block; static char hsa_buf[PAGE_SIZE] __aligned(PAGE_SIZE); @@ -182,8 +182,8 @@ static const struct file_operations zcore_memmap_fops = { static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { - if (ipl_block) { - diag308(DIAG308_SET, ipl_block); + if (zcore_ipl_block) { + diag308(DIAG308_SET, zcore_ipl_block); diag308(DIAG308_LOAD_CLEAR, NULL); } return count; @@ -191,7 +191,7 @@ static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, static int zcore_reipl_open(struct inode *inode, struct file *filp) { - return nonseekable_open(inode, filp); + return stream_open(inode, filp); } static int zcore_reipl_release(struct inode *inode, struct file *filp) @@ -265,18 +265,20 @@ static int __init zcore_reipl_init(void) return rc; if (ipib_info.ipib == 0) return 0; - ipl_block = (void *) __get_free_page(GFP_KERNEL); - if (!ipl_block) + zcore_ipl_block = (void *) __get_free_page(GFP_KERNEL); + if (!zcore_ipl_block) return -ENOMEM; if (ipib_info.ipib < sclp.hsa_size) - rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); + rc = memcpy_hsa_kernel(zcore_ipl_block, ipib_info.ipib, + PAGE_SIZE); else - rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); - if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) != + rc = memcpy_real(zcore_ipl_block, (void *) ipib_info.ipib, + PAGE_SIZE); + if (rc || (__force u32)csum_partial(zcore_ipl_block, zcore_ipl_block->hdr.len, 0) != ipib_info.checksum) { TRACE("Checksum does not match\n"); - free_page((unsigned long) ipl_block); - ipl_block = NULL; + free_page((unsigned long) zcore_ipl_block); + zcore_ipl_block = NULL; } return 0; } diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index f230516abb96..f6a8db04177c 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -20,5 +20,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o -vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o +vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \ + vfio_ccw_async.o obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c index a45011e4529e..4534afc63591 100644 --- a/drivers/s390/cio/airq.c +++ b/drivers/s390/cio/airq.c @@ -27,6 +27,8 @@ static DEFINE_SPINLOCK(airq_lists_lock); static struct hlist_head airq_lists[MAX_ISC+1]; +static struct kmem_cache *airq_iv_cache; + /** * register_adapter_interrupt() - register adapter interrupt handler * @airq: pointer to adapter interrupt descriptor @@ -95,7 +97,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy) rcu_read_lock(); hlist_for_each_entry_rcu(airq, head, list) if ((*airq->lsi_ptr & airq->lsi_mask) != 0) - airq->handler(airq); + airq->handler(airq, !tpi_info->directed_irq); rcu_read_unlock(); return IRQ_HANDLED; @@ -129,10 +131,21 @@ struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags) if (!iv) goto out; iv->bits = bits; + iv->flags = flags; size = BITS_TO_LONGS(bits) * sizeof(unsigned long); - iv->vector = kzalloc(size, GFP_KERNEL); - if (!iv->vector) - goto out_free; + + if (flags & AIRQ_IV_CACHELINE) { + if ((cache_line_size() * BITS_PER_BYTE) < bits) + goto out_free; + + iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL); + if (!iv->vector) + goto out_free; + } else { + iv->vector = kzalloc(size, GFP_KERNEL); + if (!iv->vector) + goto out_free; + } if (flags & AIRQ_IV_ALLOC) { iv->avail = kmalloc(size, GFP_KERNEL); if (!iv->avail) @@ -165,7 +178,10 @@ out_free: kfree(iv->ptr); kfree(iv->bitlock); kfree(iv->avail); - kfree(iv->vector); + if (iv->flags & AIRQ_IV_CACHELINE) + kmem_cache_free(airq_iv_cache, iv->vector); + else + kfree(iv->vector); kfree(iv); out: return NULL; @@ -181,7 +197,10 @@ void airq_iv_release(struct airq_iv *iv) kfree(iv->data); kfree(iv->ptr); kfree(iv->bitlock); - kfree(iv->vector); + if (iv->flags & AIRQ_IV_CACHELINE) + kmem_cache_free(airq_iv_cache, iv->vector); + else + kfree(iv->vector); kfree(iv->avail); kfree(iv); } @@ -275,3 +294,13 @@ unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, return bit; } EXPORT_SYMBOL(airq_iv_scan); + +static int __init airq_init(void) +{ + airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(), + cache_line_size(), 0, NULL); + if (!airq_iv_cache) + return -ENOMEM; + return 0; +} +subsys_initcall(airq_init); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index de744ca158fd..18f5458f90e8 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -564,7 +564,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy) } static struct irqaction io_interrupt = { - .name = "IO", + .name = "I/O", .handler = do_cio_interrupt, }; diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 9811fd8a0c73..06a91743335a 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -51,7 +51,7 @@ struct tpi_info { struct subchannel_id schid; u32 intparm; u32 adapter_IO:1; - u32 :1; + u32 directed_irq:1; u32 isc:3; u32 :27; u32 type:3; @@ -115,7 +115,7 @@ struct subchannel { struct schib_config config; } __attribute__ ((aligned(8))); -DECLARE_PER_CPU(struct irb, cio_irb); +DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb); #define to_subchannel(n) container_of(n, struct subchannel, dev) diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c index 14d328338ce2..08eb10283b18 100644 --- a/drivers/s390/cio/ioasm.c +++ b/drivers/s390/cio/ioasm.c @@ -233,6 +233,7 @@ int hsch(struct subchannel_id schid) return ccode; } +EXPORT_SYMBOL(hsch); static inline int __xsch(struct subchannel_id schid) { diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index a6f7c2986b94..a06944399865 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -228,9 +228,6 @@ struct qdio_q { */ int first_to_check; - /* first_to_check of the last time */ - int last_move; - /* beginning position for calling the program */ int first_to_kick; @@ -341,8 +338,7 @@ static inline int multicast_outbound(struct qdio_q *q) (q->nr == q->irq_ptr->nr_output_qs - 1); } -#define pci_out_supported(q) \ - (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) +#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) #define is_qebsm(q) (q->irq_ptr->sch_token != 0) #define need_siga_in(q) (q->irq_ptr->siga_flag.input) diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index d2f98e5829d4..35410e6eda2e 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -121,15 +121,14 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n", q->timestamp, last_ai_time); - seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n", - atomic_read(&q->nr_buf_used), - q->first_to_check, q->last_move); + seq_printf(m, "nr_used: %d ftc: %d\n", + atomic_read(&q->nr_buf_used), q->first_to_check); if (q->is_input_q) { seq_printf(m, "polling: %d ack start: %d ack count: %d\n", q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); - seq_printf(m, "DSCI: %d IRQs disabled: %u\n", - *(u32 *)q->irq_ptr->dsci, + seq_printf(m, "DSCI: %x IRQs disabled: %u\n", + *(u8 *)q->irq_ptr->dsci, test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state)); } diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9537e656e927..cfce255521ac 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -371,7 +371,7 @@ static inline int qdio_siga_input(struct qdio_q *q) static inline void qdio_sync_queues(struct qdio_q *q) { /* PCI capable outbound queues will also be scanned so sync them too */ - if (pci_out_supported(q)) + if (pci_out_supported(q->irq_ptr)) qdio_siga_sync_all(q); else qdio_siga_sync_q(q); @@ -415,7 +415,8 @@ static inline void account_sbals(struct qdio_q *q, unsigned int count) q->q_stats.nr_sbals[pos]++; } -static void process_buffer_error(struct qdio_q *q, int count) +static void process_buffer_error(struct qdio_q *q, unsigned int start, + int count) { unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : SLSB_P_OUTPUT_NOT_INIT; @@ -424,29 +425,29 @@ static void process_buffer_error(struct qdio_q *q, int count) /* special handling for no target buffer empty */ if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q && - q->sbal[q->first_to_check]->element[15].sflags == 0x10) { + q->sbal[start]->element[15].sflags == 0x10) { qperf_inc(q, target_full); - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", - q->first_to_check); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); goto set; } DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); - DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); + DBF_ERROR("FTC:%3d C:%3d", start, count); DBF_ERROR("F14:%2x F15:%2x", - q->sbal[q->first_to_check]->element[14].sflags, - q->sbal[q->first_to_check]->element[15].sflags); + q->sbal[start]->element[14].sflags, + q->sbal[start]->element[15].sflags); set: /* * Interrupts may be avoided as long as the error is present * so change the buffer state immediately to avoid starvation. */ - set_buf_states(q, q->first_to_check, state, count); + set_buf_states(q, start, state, count); } -static inline void inbound_primed(struct qdio_q *q, int count) +static inline void inbound_primed(struct qdio_q *q, unsigned int start, + int count) { int new; @@ -457,7 +458,7 @@ static inline void inbound_primed(struct qdio_q *q, int count) if (!q->u.in.polling) { q->u.in.polling = 1; q->u.in.ack_count = count; - q->u.in.ack_start = q->first_to_check; + q->u.in.ack_start = start; return; } @@ -465,7 +466,7 @@ static inline void inbound_primed(struct qdio_q *q, int count) set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, q->u.in.ack_count); q->u.in.ack_count = count; - q->u.in.ack_start = q->first_to_check; + q->u.in.ack_start = start; return; } @@ -473,7 +474,7 @@ static inline void inbound_primed(struct qdio_q *q, int count) * ACK the newest buffer. The ACK will be removed in qdio_stop_polling * or by the next inbound run. */ - new = add_buf(q->first_to_check, count - 1); + new = add_buf(start, count - 1); if (q->u.in.polling) { /* reset the previous ACK but first set the new one */ set_buf_state(q, new, SLSB_P_INPUT_ACK); @@ -488,10 +489,10 @@ static inline void inbound_primed(struct qdio_q *q, int count) if (!count) return; /* need to change ALL buffers to get more interrupts */ - set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); + set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); } -static int get_inbound_buffer_frontier(struct qdio_q *q) +static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) { unsigned char state = 0; int count; @@ -504,64 +505,58 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); if (!count) - goto out; + return 0; /* * No siga sync here, as a PCI or we after a thin interrupt * already sync'ed the queues. */ - count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); + count = get_buf_states(q, start, &state, count, 1, 0); if (!count) - goto out; + return 0; switch (state) { case SLSB_P_INPUT_PRIMED: - inbound_primed(q, count); - q->first_to_check = add_buf(q->first_to_check, count); + inbound_primed(q, start, count); if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); - break; + return count; case SLSB_P_INPUT_ERROR: - process_buffer_error(q, count); - q->first_to_check = add_buf(q->first_to_check, count); + process_buffer_error(q, start, count); if (atomic_sub_return(count, &q->nr_buf_used) == 0) qperf_inc(q, inbound_queue_full); if (q->irq_ptr->perf_stat_enabled) account_sbals_error(q, count); - break; + return count; case SLSB_CU_INPUT_EMPTY: case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_ACK: if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x", - q->nr, q->first_to_check); - break; + q->nr, start); + return 0; default: WARN_ON_ONCE(1); + return 0; } -out: - return q->first_to_check; } -static int qdio_inbound_q_moved(struct qdio_q *q) +static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start) { - int bufnr; + int count; - bufnr = get_inbound_buffer_frontier(q); + count = get_inbound_buffer_frontier(q, start); - if (bufnr != q->last_move) { - q->last_move = bufnr; - if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) - q->u.in.timestamp = get_tod_clock(); - return 1; - } else - return 0; + if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) + q->u.in.timestamp = get_tod_clock(); + + return count; } -static inline int qdio_inbound_q_done(struct qdio_q *q) +static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) { unsigned char state = 0; @@ -570,7 +565,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) if (need_siga_sync(q)) qdio_siga_sync_q(q); - get_buf_state(q, q->first_to_check, &state, 0); + get_buf_state(q, start, &state, 0); if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) /* more work coming */ @@ -588,8 +583,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) * has (probably) not moved (see qdio_inbound_processing). */ if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { - DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", - q->first_to_check); + DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start); return 1; } else return 0; @@ -637,17 +631,13 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, return phys_aob; } -static void qdio_kick_handler(struct qdio_q *q) +static void qdio_kick_handler(struct qdio_q *q, unsigned int count) { int start = q->first_to_kick; - int end = q->first_to_check; - int count; if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return; - count = sub_buf(end, start); - if (q->is_input_q) { qperf_inc(q, inbound_handler); DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); @@ -663,7 +653,7 @@ static void qdio_kick_handler(struct qdio_q *q) q->irq_ptr->int_parm); /* for the next time */ - q->first_to_kick = end; + q->first_to_kick = add_buf(start, count); q->qdio_error = 0; } @@ -678,14 +668,20 @@ static inline int qdio_tasklet_schedule(struct qdio_q *q) static void __qdio_inbound_processing(struct qdio_q *q) { + unsigned int start = q->first_to_check; + int count; + qperf_inc(q, tasklet_inbound); - if (!qdio_inbound_q_moved(q)) + count = qdio_inbound_q_moved(q, start); + if (count == 0) return; - qdio_kick_handler(q); + start = add_buf(start, count); + q->first_to_check = start; + qdio_kick_handler(q, count); - if (!qdio_inbound_q_done(q)) { + if (!qdio_inbound_q_done(q, start)) { /* means poll time is not yet over */ qperf_inc(q, tasklet_inbound_resched); if (!qdio_tasklet_schedule(q)) @@ -697,7 +693,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) * We need to check again to not lose initiative after * resetting the ACK state. */ - if (!qdio_inbound_q_done(q)) { + if (!qdio_inbound_q_done(q, start)) { qperf_inc(q, tasklet_inbound_resched2); qdio_tasklet_schedule(q); } @@ -709,7 +705,7 @@ void qdio_inbound_processing(unsigned long data) __qdio_inbound_processing(q); } -static int get_outbound_buffer_frontier(struct qdio_q *q) +static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) { unsigned char state = 0; int count; @@ -718,7 +714,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) if (need_siga_sync(q)) if (((queue_type(q) != QDIO_IQDIO_QFMT) && - !pci_out_supported(q)) || + !pci_out_supported(q->irq_ptr)) || (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) qdio_siga_sync_q(q); @@ -729,12 +725,11 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) */ count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); if (!count) - goto out; + return 0; - count = get_buf_states(q, q->first_to_check, &state, count, 0, - q->u.out.use_cq); + count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq); if (!count) - goto out; + return 0; switch (state) { case SLSB_P_OUTPUT_EMPTY: @@ -743,34 +738,29 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) "out empty:%1d %02x", q->nr, count); atomic_sub(count, &q->nr_buf_used); - q->first_to_check = add_buf(q->first_to_check, count); if (q->irq_ptr->perf_stat_enabled) account_sbals(q, count); - - break; + return count; case SLSB_P_OUTPUT_ERROR: - process_buffer_error(q, count); - q->first_to_check = add_buf(q->first_to_check, count); + process_buffer_error(q, start, count); atomic_sub(count, &q->nr_buf_used); if (q->irq_ptr->perf_stat_enabled) account_sbals_error(q, count); - break; + return count; case SLSB_CU_OUTPUT_PRIMED: /* the adapter has not fetched the output yet */ if (q->irq_ptr->perf_stat_enabled) q->q_stats.nr_sbal_nop++; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); - break; + return 0; case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_HALTED: - break; + return 0; default: WARN_ON_ONCE(1); + return 0; } - -out: - return q->first_to_check; } /* all buffers processed? */ @@ -779,18 +769,16 @@ static inline int qdio_outbound_q_done(struct qdio_q *q) return atomic_read(&q->nr_buf_used) == 0; } -static inline int qdio_outbound_q_moved(struct qdio_q *q) +static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) { - int bufnr; + int count; - bufnr = get_outbound_buffer_frontier(q); + count = get_outbound_buffer_frontier(q, start); - if (bufnr != q->last_move) { - q->last_move = bufnr; + if (count) DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); - return 1; - } else - return 0; + + return count; } static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) @@ -837,15 +825,21 @@ retry: static void __qdio_outbound_processing(struct qdio_q *q) { + unsigned int start = q->first_to_check; + int count; + qperf_inc(q, tasklet_outbound); WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); - if (qdio_outbound_q_moved(q)) - qdio_kick_handler(q); + count = qdio_outbound_q_moved(q, start); + if (count) { + q->first_to_check = add_buf(start, count); + qdio_kick_handler(q, count); + } - if (queue_type(q) == QDIO_ZFCP_QFMT) - if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) - goto sched; + if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) && + !qdio_outbound_q_done(q)) + goto sched; if (q->u.out.pci_out_enabled) return; @@ -881,37 +875,40 @@ void qdio_outbound_timer(struct timer_list *t) qdio_tasklet_schedule(q); } -static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) +static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) { struct qdio_q *out; int i; - if (!pci_out_supported(q)) + if (!pci_out_supported(irq)) return; - for_each_output_queue(q->irq_ptr, out, i) + for_each_output_queue(irq, out, i) if (!qdio_outbound_q_done(out)) qdio_tasklet_schedule(out); } static void __tiqdio_inbound_processing(struct qdio_q *q) { + unsigned int start = q->first_to_check; + int count; + qperf_inc(q, tasklet_inbound); if (need_siga_sync(q) && need_siga_sync_after_ai(q)) qdio_sync_queues(q); - /* - * The interrupt could be caused by a PCI request. Check the - * PCI capable outbound queues. - */ - qdio_check_outbound_after_thinint(q); + /* The interrupt could be caused by a PCI request: */ + qdio_check_outbound_pci_queues(q->irq_ptr); - if (!qdio_inbound_q_moved(q)) + count = qdio_inbound_q_moved(q, start); + if (count == 0) return; - qdio_kick_handler(q); + start = add_buf(start, count); + q->first_to_check = start; + qdio_kick_handler(q, count); - if (!qdio_inbound_q_done(q)) { + if (!qdio_inbound_q_done(q, start)) { qperf_inc(q, tasklet_inbound_resched); if (!qdio_tasklet_schedule(q)) return; @@ -922,7 +919,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) * We need to check again to not lose initiative after * resetting the ACK state. */ - if (!qdio_inbound_q_done(q)) { + if (!qdio_inbound_q_done(q, start)) { qperf_inc(q, tasklet_inbound_resched2); qdio_tasklet_schedule(q); } @@ -976,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) } } - if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) + if (!pci_out_supported(irq_ptr)) return; for_each_output_queue(irq_ptr, q, i) { @@ -1642,7 +1639,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr) */ if (test_nonshared_ind(irq_ptr)) goto rescan; - if (!qdio_inbound_q_done(q)) + if (!qdio_inbound_q_done(q, q->first_to_check)) goto rescan; return 0; @@ -1672,12 +1669,14 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, int *error) { struct qdio_q *q; - int start, end; struct qdio_irq *irq_ptr = cdev->private->qdio_data; + unsigned int start; + int count; if (!irq_ptr) return -ENODEV; q = irq_ptr->input_qs[nr]; + start = q->first_to_check; /* * Cannot rely on automatic sync after interrupt since queues may @@ -1686,25 +1685,27 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, if (need_siga_sync(q)) qdio_sync_queues(q); - /* check the PCI capable outbound queues. */ - qdio_check_outbound_after_thinint(q); + qdio_check_outbound_pci_queues(irq_ptr); - if (!qdio_inbound_q_moved(q)) + count = qdio_inbound_q_moved(q, start); + if (count == 0) return 0; + start = add_buf(start, count); + q->first_to_check = start; + /* Note: upper-layer MUST stop processing immediately here ... */ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) return -EIO; - start = q->first_to_kick; - end = q->first_to_check; - *bufnr = start; + *bufnr = q->first_to_kick; *error = q->qdio_error; /* for the next time */ - q->first_to_kick = end; + q->first_to_kick = add_buf(q->first_to_kick, count); q->qdio_error = 0; - return sub_buf(end, start); + + return count; } EXPORT_SYMBOL(qdio_get_next_buffers); diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index a59887fad13e..99d7d2566a3a 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -523,7 +523,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, irq_ptr->schid.sch_no, is_thinint_irq(irq_ptr), (irq_ptr->sch_token) ? 1 : 0, - (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, + pci_out_supported(irq_ptr) ? 1 : 0, css_general_characteristics.aif_tdd, (irq_ptr->siga_flag.input) ? "R" : " ", (irq_ptr->siga_flag.output) ? "W" : " ", diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 07dea602205b..28d59ac2204c 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -40,7 +40,7 @@ static LIST_HEAD(tiq_list); static DEFINE_MUTEX(tiq_list_lock); /* Adapter interrupt definitions */ -static void tiqdio_thinint_handler(struct airq_struct *airq); +static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating); static struct airq_struct tiqdio_airq = { .handler = tiqdio_thinint_handler, @@ -179,7 +179,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) * tiqdio_thinint_handler - thin interrupt handler for qdio * @airq: pointer to adapter interrupt descriptor */ -static void tiqdio_thinint_handler(struct airq_struct *airq) +static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) { u32 si_used = clear_shared_ind(); struct qdio_q *q; diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c new file mode 100644 index 000000000000..8c1d2357ef5b --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_async.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Async I/O region for vfio_ccw + * + * Copyright Red Hat, Inc. 2019 + * + * Author(s): Cornelia Huck <cohuck@redhat.com> + */ + +#include <linux/vfio.h> +#include <linux/mdev.h> + +#include "vfio_ccw_private.h" + +static ssize_t vfio_ccw_async_region_read(struct vfio_ccw_private *private, + char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS; + loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; + struct ccw_cmd_region *region; + int ret; + + if (pos + count > sizeof(*region)) + return -EINVAL; + + mutex_lock(&private->io_mutex); + region = private->region[i].data; + if (copy_to_user(buf, (void *)region + pos, count)) + ret = -EFAULT; + else + ret = count; + mutex_unlock(&private->io_mutex); + return ret; +} + +static ssize_t vfio_ccw_async_region_write(struct vfio_ccw_private *private, + const char __user *buf, size_t count, + loff_t *ppos) +{ + unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS; + loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; + struct ccw_cmd_region *region; + int ret; + + if (pos + count > sizeof(*region)) + return -EINVAL; + + if (!mutex_trylock(&private->io_mutex)) + return -EAGAIN; + + region = private->region[i].data; + if (copy_from_user((void *)region + pos, buf, count)) { + ret = -EFAULT; + goto out_unlock; + } + + vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_ASYNC_REQ); + + ret = region->ret_code ? region->ret_code : count; + +out_unlock: + mutex_unlock(&private->io_mutex); + return ret; +} + +static void vfio_ccw_async_region_release(struct vfio_ccw_private *private, + struct vfio_ccw_region *region) +{ + +} + +const struct vfio_ccw_regops vfio_ccw_async_region_ops = { + .read = vfio_ccw_async_region_read, + .write = vfio_ccw_async_region_write, + .release = vfio_ccw_async_region_release, +}; + +int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private) +{ + return vfio_ccw_register_dev_region(private, + VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD, + &vfio_ccw_async_region_ops, + sizeof(struct ccw_cmd_region), + VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE, + private->cmd_region); +} diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 384b3987eeb4..0e79799e9a71 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -362,6 +362,7 @@ static void cp_unpin_free(struct channel_program *cp) struct ccwchain *chain, *temp; int i; + cp->initialized = false; list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { for (i = 0; i < chain->ch_len; i++) { pfn_array_table_unpin_free(chain->ch_pat + i, @@ -732,6 +733,9 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) */ cp->orb.cmd.c64 = 1; + if (!ret) + cp->initialized = true; + return ret; } @@ -746,7 +750,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) */ void cp_free(struct channel_program *cp) { - cp_unpin_free(cp); + if (cp->initialized) + cp_unpin_free(cp); } /** @@ -791,6 +796,10 @@ int cp_prefetch(struct channel_program *cp) struct ccwchain *chain; int len, idx, ret; + /* this is an error in the caller */ + if (!cp->initialized) + return -EINVAL; + list_for_each_entry(chain, &cp->ccwchain_list, next) { len = chain->ch_len; for (idx = 0; idx < len; idx++) { @@ -826,6 +835,10 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm) struct ccwchain *chain; struct ccw1 *cpa; + /* this is an error in the caller */ + if (!cp->initialized) + return NULL; + orb = &cp->orb; orb->cmd.intparm = intparm; @@ -862,6 +875,9 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) u32 cpa = scsw->cmd.cpa; u32 ccw_head; + if (!cp->initialized) + return; + /* * LATER: * For now, only update the cmd.cpa part. We may need to deal with @@ -898,6 +914,9 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova) struct ccwchain *chain; int i; + if (!cp->initialized) + return false; + list_for_each_entry(chain, &cp->ccwchain_list, next) { for (i = 0; i < chain->ch_len; i++) if (pfn_array_table_iova_pinned(chain->ch_pat + i, diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h index a4b74fb1aa57..3c20cd208da5 100644 --- a/drivers/s390/cio/vfio_ccw_cp.h +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -21,6 +21,7 @@ * @ccwchain_list: list head of ccwchains * @orb: orb for the currently processed ssch request * @mdev: the mediated device to perform page pinning/unpinning + * @initialized: whether this instance is actually initialized * * @ccwchain_list is the head of a ccwchain list, that contents the * translated result of the guest channel program that pointed out by @@ -30,6 +31,7 @@ struct channel_program { struct list_head ccwchain_list; union orb orb; struct device *mdev; + bool initialized; }; extern int cp_init(struct channel_program *cp, struct device *mdev, diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 0b3b9de45c60..ee8767f5845a 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -3,9 +3,11 @@ * VFIO based Physical Subchannel device driver * * Copyright IBM Corp. 2017 + * Copyright Red Hat, Inc. 2019 * * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + * Cornelia Huck <cohuck@redhat.com> */ #include <linux/module.h> @@ -23,6 +25,7 @@ struct workqueue_struct *vfio_ccw_work_q; static struct kmem_cache *vfio_ccw_io_region; +static struct kmem_cache *vfio_ccw_cmd_region; /* * Helpers @@ -40,26 +43,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch) if (ret != -EBUSY) goto out_unlock; + iretry = 255; do { - iretry = 255; ret = cio_cancel_halt_clear(sch, &iretry); - while (ret == -EBUSY) { - /* - * Flush all I/O and wait for - * cancel/halt/clear completion. - */ - private->completion = &completion; - spin_unlock_irq(sch->lock); - wait_for_completion_timeout(&completion, 3*HZ); + if (ret == -EIO) { + pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", + sch->schid.ssid, sch->schid.sch_no); + break; + } + + /* + * Flush all I/O and wait for + * cancel/halt/clear completion. + */ + private->completion = &completion; + spin_unlock_irq(sch->lock); - spin_lock_irq(sch->lock); - private->completion = NULL; - flush_workqueue(vfio_ccw_work_q); - ret = cio_cancel_halt_clear(sch, &iretry); - }; + if (ret == -EBUSY) + wait_for_completion_timeout(&completion, 3*HZ); + private->completion = NULL; + flush_workqueue(vfio_ccw_work_q); + spin_lock_irq(sch->lock); ret = cio_disable_subchannel(sch); } while (ret == -EBUSY); out_unlock: @@ -84,7 +91,9 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) if (is_final) cp_free(&private->cp); } + mutex_lock(&private->io_mutex); memcpy(private->io_region->irb_area, irb, sizeof(*irb)); + mutex_unlock(&private->io_mutex); if (private->io_trigger) eventfd_signal(private->io_trigger, 1); @@ -108,7 +117,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) { struct pmcw *pmcw = &sch->schib.pmcw; struct vfio_ccw_private *private; - int ret; + int ret = -ENOMEM; if (pmcw->qf) { dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", @@ -122,13 +131,17 @@ static int vfio_ccw_sch_probe(struct subchannel *sch) private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, GFP_KERNEL | GFP_DMA); - if (!private->io_region) { - kfree(private); - return -ENOMEM; - } + if (!private->io_region) + goto out_free; + + private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, + GFP_KERNEL | GFP_DMA); + if (!private->cmd_region) + goto out_free; private->sch = sch; dev_set_drvdata(&sch->dev, private); + mutex_init(&private->io_mutex); spin_lock_irq(sch->lock); private->state = VFIO_CCW_STATE_NOT_OPER; @@ -152,7 +165,10 @@ out_disable: cio_disable_subchannel(sch); out_free: dev_set_drvdata(&sch->dev, NULL); - kmem_cache_free(vfio_ccw_io_region, private->io_region); + if (private->cmd_region) + kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); + if (private->io_region) + kmem_cache_free(vfio_ccw_io_region, private->io_region); kfree(private); return ret; } @@ -167,6 +183,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch) dev_set_drvdata(&sch->dev, NULL); + kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); kmem_cache_free(vfio_ccw_io_region, private->io_region); kfree(private); @@ -241,7 +258,7 @@ static struct css_driver vfio_ccw_sch_driver = { static int __init vfio_ccw_sch_init(void) { - int ret; + int ret = -ENOMEM; vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); if (!vfio_ccw_work_q) @@ -251,20 +268,30 @@ static int __init vfio_ccw_sch_init(void) sizeof(struct ccw_io_region), 0, SLAB_ACCOUNT, 0, sizeof(struct ccw_io_region), NULL); - if (!vfio_ccw_io_region) { - destroy_workqueue(vfio_ccw_work_q); - return -ENOMEM; - } + if (!vfio_ccw_io_region) + goto out_err; + + vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", + sizeof(struct ccw_cmd_region), 0, + SLAB_ACCOUNT, 0, + sizeof(struct ccw_cmd_region), NULL); + if (!vfio_ccw_cmd_region) + goto out_err; isc_register(VFIO_CCW_ISC); ret = css_driver_register(&vfio_ccw_sch_driver); if (ret) { isc_unregister(VFIO_CCW_ISC); - kmem_cache_destroy(vfio_ccw_io_region); - destroy_workqueue(vfio_ccw_work_q); + goto out_err; } return ret; + +out_err: + kmem_cache_destroy(vfio_ccw_cmd_region); + kmem_cache_destroy(vfio_ccw_io_region); + destroy_workqueue(vfio_ccw_work_q); + return ret; } static void __exit vfio_ccw_sch_exit(void) diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index cab17865aafe..49d9d3da0282 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -3,8 +3,10 @@ * Finite state machine for vfio-ccw device handling * * Copyright IBM Corp. 2017 + * Copyright Red Hat, Inc. 2019 * * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + * Cornelia Huck <cohuck@redhat.com> */ #include <linux/vfio.h> @@ -28,9 +30,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private) sch = private->sch; spin_lock_irqsave(sch->lock, flags); - private->state = VFIO_CCW_STATE_BUSY; orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); + if (!orb) { + ret = -EIO; + goto out; + } /* Issue "Start Subchannel" */ ccode = ssch(sch->schid, orb); @@ -42,6 +47,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private) */ sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; ret = 0; + private->state = VFIO_CCW_STATE_CP_PENDING; break; case 1: /* Status pending */ case 2: /* Busy */ @@ -64,6 +70,76 @@ static int fsm_io_helper(struct vfio_ccw_private *private) default: ret = ccode; } +out: + spin_unlock_irqrestore(sch->lock, flags); + return ret; +} + +static int fsm_do_halt(struct vfio_ccw_private *private) +{ + struct subchannel *sch; + unsigned long flags; + int ccode; + int ret; + + sch = private->sch; + + spin_lock_irqsave(sch->lock, flags); + + /* Issue "Halt Subchannel" */ + ccode = hsch(sch->schid); + + switch (ccode) { + case 0: + /* + * Initialize device status information + */ + sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND; + ret = 0; + break; + case 1: /* Status pending */ + case 2: /* Busy */ + ret = -EBUSY; + break; + case 3: /* Device not operational */ + ret = -ENODEV; + break; + default: + ret = ccode; + } + spin_unlock_irqrestore(sch->lock, flags); + return ret; +} + +static int fsm_do_clear(struct vfio_ccw_private *private) +{ + struct subchannel *sch; + unsigned long flags; + int ccode; + int ret; + + sch = private->sch; + + spin_lock_irqsave(sch->lock, flags); + + /* Issue "Clear Subchannel" */ + ccode = csch(sch->schid); + + switch (ccode) { + case 0: + /* + * Initialize device status information + */ + sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND; + /* TODO: check what else we might need to clear */ + ret = 0; + break; + case 3: /* Device not operational */ + ret = -ENODEV; + break; + default: + ret = ccode; + } spin_unlock_irqrestore(sch->lock, flags); return ret; } @@ -102,6 +178,30 @@ static void fsm_io_busy(struct vfio_ccw_private *private, private->io_region->ret_code = -EBUSY; } +static void fsm_io_retry(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + private->io_region->ret_code = -EAGAIN; +} + +static void fsm_async_error(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + struct ccw_cmd_region *cmd_region = private->cmd_region; + + pr_err("vfio-ccw: FSM: %s request from state:%d\n", + cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" : + cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" : + "<unknown>", private->state); + cmd_region->ret_code = -EIO; +} + +static void fsm_async_retry(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + private->cmd_region->ret_code = -EAGAIN; +} + static void fsm_disabled_irq(struct vfio_ccw_private *private, enum vfio_ccw_event event) { @@ -130,8 +230,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, struct mdev_device *mdev = private->mdev; char *errstr = "request"; - private->state = VFIO_CCW_STATE_BUSY; - + private->state = VFIO_CCW_STATE_CP_PROCESSING; memcpy(scsw, io_region->scsw_area, sizeof(*scsw)); if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { @@ -166,22 +265,42 @@ static void fsm_io_request(struct vfio_ccw_private *private, } return; } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) { - /* XXX: Handle halt. */ + /* halt is handled via the async cmd region */ io_region->ret_code = -EOPNOTSUPP; goto err_out; } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { - /* XXX: Handle clear. */ + /* clear is handled via the async cmd region */ io_region->ret_code = -EOPNOTSUPP; goto err_out; } err_out: - private->state = VFIO_CCW_STATE_IDLE; trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private), io_region->ret_code, errstr); } /* + * Deal with an async request from userspace. + */ +static void fsm_async_request(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + struct ccw_cmd_region *cmd_region = private->cmd_region; + + switch (cmd_region->command) { + case VFIO_CCW_ASYNC_CMD_HSCH: + cmd_region->ret_code = fsm_do_halt(private); + break; + case VFIO_CCW_ASYNC_CMD_CSCH: + cmd_region->ret_code = fsm_do_clear(private); + break; + default: + /* should not happen? */ + cmd_region->ret_code = -EINVAL; + } +} + +/* * Got an interrupt for a normal io (state busy). */ static void fsm_irq(struct vfio_ccw_private *private, @@ -204,21 +323,31 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = { [VFIO_CCW_STATE_NOT_OPER] = { [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error, + [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error, [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq, }, [VFIO_CCW_STATE_STANDBY] = { [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error, + [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error, [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, }, [VFIO_CCW_STATE_IDLE] = { [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request, + [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request, + [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, + }, + [VFIO_CCW_STATE_CP_PROCESSING] = { + [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, + [VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry, + [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry, [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, }, - [VFIO_CCW_STATE_BUSY] = { + [VFIO_CCW_STATE_CP_PENDING] = { [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy, + [VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request, [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, }, }; diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index f673e106c041..5eb61116ca6f 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -3,13 +3,17 @@ * Physical device callbacks for vfio_ccw * * Copyright IBM Corp. 2017 + * Copyright Red Hat, Inc. 2019 * * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + * Cornelia Huck <cohuck@redhat.com> */ #include <linux/vfio.h> #include <linux/mdev.h> +#include <linux/nospec.h> +#include <linux/slab.h> #include "vfio_ccw_private.h" @@ -130,11 +134,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev) if ((private->state != VFIO_CCW_STATE_NOT_OPER) && (private->state != VFIO_CCW_STATE_STANDBY)) { - if (!vfio_ccw_mdev_reset(mdev)) + if (!vfio_ccw_sch_quiesce(private->sch)) private->state = VFIO_CCW_STATE_STANDBY; /* The state will be NOT_OPER on error. */ } + cp_free(&private->cp); private->mdev = NULL; atomic_inc(&private->avail); @@ -146,20 +151,66 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev) struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; + int ret; private->nb.notifier_call = vfio_ccw_mdev_notifier; - return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, - &events, &private->nb); + ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &events, &private->nb); + if (ret) + return ret; + + ret = vfio_ccw_register_async_dev_regions(private); + if (ret) + vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &private->nb); + return ret; } static void vfio_ccw_mdev_release(struct mdev_device *mdev) { struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev)); + int i; + if ((private->state != VFIO_CCW_STATE_NOT_OPER) && + (private->state != VFIO_CCW_STATE_STANDBY)) { + if (!vfio_ccw_mdev_reset(mdev)) + private->state = VFIO_CCW_STATE_STANDBY; + /* The state will be NOT_OPER on error. */ + } + + cp_free(&private->cp); vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &private->nb); + + for (i = 0; i < private->num_regions; i++) + private->region[i].ops->release(private, &private->region[i]); + + private->num_regions = 0; + kfree(private->region); + private->region = NULL; +} + +static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private, + char __user *buf, size_t count, + loff_t *ppos) +{ + loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; + struct ccw_io_region *region; + int ret; + + if (pos + count > sizeof(*region)) + return -EINVAL; + + mutex_lock(&private->io_mutex); + region = private->io_region; + if (copy_to_user(buf, (void *)region + pos, count)) + ret = -EFAULT; + else + ret = count; + mutex_unlock(&private->io_mutex); + return ret; } static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev, @@ -167,18 +218,54 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev, size_t count, loff_t *ppos) { + unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos); struct vfio_ccw_private *private; + + private = dev_get_drvdata(mdev_parent_dev(mdev)); + + if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions) + return -EINVAL; + + switch (index) { + case VFIO_CCW_CONFIG_REGION_INDEX: + return vfio_ccw_mdev_read_io_region(private, buf, count, ppos); + default: + index -= VFIO_CCW_NUM_REGIONS; + return private->region[index].ops->read(private, buf, count, + ppos); + } + + return -EINVAL; +} + +static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private, + const char __user *buf, + size_t count, loff_t *ppos) +{ + loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK; struct ccw_io_region *region; + int ret; - if (*ppos + count > sizeof(*region)) + if (pos + count > sizeof(*region)) return -EINVAL; - private = dev_get_drvdata(mdev_parent_dev(mdev)); + if (!mutex_trylock(&private->io_mutex)) + return -EAGAIN; + region = private->io_region; - if (copy_to_user(buf, (void *)region + *ppos, count)) - return -EFAULT; + if (copy_from_user((void *)region + pos, buf, count)) { + ret = -EFAULT; + goto out_unlock; + } - return count; + vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ); + if (region->ret_code != 0) + private->state = VFIO_CCW_STATE_IDLE; + ret = (region->ret_code != 0) ? region->ret_code : count; + +out_unlock: + mutex_unlock(&private->io_mutex); + return ret; } static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev, @@ -186,42 +273,47 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev, size_t count, loff_t *ppos) { + unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos); struct vfio_ccw_private *private; - struct ccw_io_region *region; - - if (*ppos + count > sizeof(*region)) - return -EINVAL; private = dev_get_drvdata(mdev_parent_dev(mdev)); - if (private->state != VFIO_CCW_STATE_IDLE) - return -EACCES; - region = private->io_region; - if (copy_from_user((void *)region + *ppos, buf, count)) - return -EFAULT; + if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions) + return -EINVAL; - vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ); - if (region->ret_code != 0) { - private->state = VFIO_CCW_STATE_IDLE; - return region->ret_code; + switch (index) { + case VFIO_CCW_CONFIG_REGION_INDEX: + return vfio_ccw_mdev_write_io_region(private, buf, count, ppos); + default: + index -= VFIO_CCW_NUM_REGIONS; + return private->region[index].ops->write(private, buf, count, + ppos); } - return count; + return -EINVAL; } -static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info) +static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info, + struct mdev_device *mdev) { + struct vfio_ccw_private *private; + + private = dev_get_drvdata(mdev_parent_dev(mdev)); info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET; - info->num_regions = VFIO_CCW_NUM_REGIONS; + info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions; info->num_irqs = VFIO_CCW_NUM_IRQS; return 0; } static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, - u16 *cap_type_id, - void **cap_type) + struct mdev_device *mdev, + unsigned long arg) { + struct vfio_ccw_private *private; + int i; + + private = dev_get_drvdata(mdev_parent_dev(mdev)); switch (info->index) { case VFIO_CCW_CONFIG_REGION_INDEX: info->offset = 0; @@ -229,9 +321,55 @@ static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, info->flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; return 0; - default: - return -EINVAL; + default: /* all other regions are handled via capability chain */ + { + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + struct vfio_region_info_cap_type cap_type = { + .header.id = VFIO_REGION_INFO_CAP_TYPE, + .header.version = 1 }; + int ret; + + if (info->index >= + VFIO_CCW_NUM_REGIONS + private->num_regions) + return -EINVAL; + + info->index = array_index_nospec(info->index, + VFIO_CCW_NUM_REGIONS + + private->num_regions); + + i = info->index - VFIO_CCW_NUM_REGIONS; + + info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index); + info->size = private->region[i].size; + info->flags = private->region[i].flags; + + cap_type.type = private->region[i].type; + cap_type.subtype = private->region[i].subtype; + + ret = vfio_info_add_capability(&caps, &cap_type.header, + sizeof(cap_type)); + if (ret) + return ret; + + info->flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info->argsz < sizeof(*info) + caps.size) { + info->argsz = sizeof(*info) + caps.size; + info->cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(*info)); + if (copy_to_user((void __user *)arg + sizeof(*info), + caps.buf, caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info->cap_offset = sizeof(*info); + } + + kfree(caps.buf); + } + } + return 0; } static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) @@ -308,6 +446,32 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev, } } +int vfio_ccw_register_dev_region(struct vfio_ccw_private *private, + unsigned int subtype, + const struct vfio_ccw_regops *ops, + size_t size, u32 flags, void *data) +{ + struct vfio_ccw_region *region; + + region = krealloc(private->region, + (private->num_regions + 1) * sizeof(*region), + GFP_KERNEL); + if (!region) + return -ENOMEM; + + private->region = region; + private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW; + private->region[private->num_regions].subtype = subtype; + private->region[private->num_regions].ops = ops; + private->region[private->num_regions].size = size; + private->region[private->num_regions].flags = flags; + private->region[private->num_regions].data = data; + + private->num_regions++; + + return 0; +} + static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg) @@ -328,7 +492,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (info.argsz < minsz) return -EINVAL; - ret = vfio_ccw_mdev_get_device_info(&info); + ret = vfio_ccw_mdev_get_device_info(&info, mdev); if (ret) return ret; @@ -337,8 +501,6 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, case VFIO_DEVICE_GET_REGION_INFO: { struct vfio_region_info info; - u16 cap_type_id = 0; - void *cap_type = NULL; minsz = offsetofend(struct vfio_region_info, offset); @@ -348,8 +510,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, if (info.argsz < minsz) return -EINVAL; - ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id, - &cap_type); + ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg); if (ret) return ret; diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h index 08e9a7dc9176..f1092c3dc1b1 100644 --- a/drivers/s390/cio/vfio_ccw_private.h +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -3,9 +3,11 @@ * Private stuff for vfio_ccw driver * * Copyright IBM Corp. 2017 + * Copyright Red Hat, Inc. 2019 * * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + * Cornelia Huck <cohuck@redhat.com> */ #ifndef _VFIO_CCW_PRIVATE_H_ @@ -19,6 +21,40 @@ #include "css.h" #include "vfio_ccw_cp.h" +#define VFIO_CCW_OFFSET_SHIFT 10 +#define VFIO_CCW_OFFSET_TO_INDEX(off) (off >> VFIO_CCW_OFFSET_SHIFT) +#define VFIO_CCW_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_CCW_OFFSET_SHIFT) +#define VFIO_CCW_OFFSET_MASK (((u64)(1) << VFIO_CCW_OFFSET_SHIFT) - 1) + +/* capability chain handling similar to vfio-pci */ +struct vfio_ccw_private; +struct vfio_ccw_region; + +struct vfio_ccw_regops { + ssize_t (*read)(struct vfio_ccw_private *private, char __user *buf, + size_t count, loff_t *ppos); + ssize_t (*write)(struct vfio_ccw_private *private, + const char __user *buf, size_t count, loff_t *ppos); + void (*release)(struct vfio_ccw_private *private, + struct vfio_ccw_region *region); +}; + +struct vfio_ccw_region { + u32 type; + u32 subtype; + const struct vfio_ccw_regops *ops; + void *data; + size_t size; + u32 flags; +}; + +int vfio_ccw_register_dev_region(struct vfio_ccw_private *private, + unsigned int subtype, + const struct vfio_ccw_regops *ops, + size_t size, u32 flags, void *data); + +int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private); + /** * struct vfio_ccw_private * @sch: pointer to the subchannel @@ -28,6 +64,10 @@ * @mdev: pointer to the mediated device * @nb: notifier for vfio events * @io_region: MMIO region to input/output I/O arguments/results + * @io_mutex: protect against concurrent update of I/O regions + * @region: additional regions for other subchannel operations + * @cmd_region: MMIO region for asynchronous I/O commands other than START + * @num_regions: number of additional regions * @cp: channel program for the current I/O operation * @irb: irb info received from interrupt * @scsw: scsw info @@ -42,6 +82,10 @@ struct vfio_ccw_private { struct mdev_device *mdev; struct notifier_block nb; struct ccw_io_region *io_region; + struct mutex io_mutex; + struct vfio_ccw_region *region; + struct ccw_cmd_region *cmd_region; + int num_regions; struct channel_program cp; struct irb irb; @@ -63,7 +107,8 @@ enum vfio_ccw_state { VFIO_CCW_STATE_NOT_OPER, VFIO_CCW_STATE_STANDBY, VFIO_CCW_STATE_IDLE, - VFIO_CCW_STATE_BUSY, + VFIO_CCW_STATE_CP_PROCESSING, + VFIO_CCW_STATE_CP_PENDING, /* last element! */ NR_VFIO_CCW_STATES }; @@ -75,6 +120,7 @@ enum vfio_ccw_event { VFIO_CCW_EVENT_NOT_OPER, VFIO_CCW_EVENT_IO_REQ, VFIO_CCW_EVENT_INTERRUPT, + VFIO_CCW_EVENT_ASYNC_REQ, /* last element! */ NR_VFIO_CCW_EVENTS }; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 1546389d71db..cc30e4f07fff 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -116,7 +116,7 @@ static int user_set_domain; static struct bus_type ap_bus_type; /* Adapter interrupt definitions */ -static void ap_interrupt_handler(struct airq_struct *airq); +static void ap_interrupt_handler(struct airq_struct *airq, bool floating); static int ap_airq_flag; @@ -393,7 +393,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) * ap_interrupt_handler() - Schedule ap_tasklet on interrupt * @airq: pointer to adapter interrupt descriptor */ -static void ap_interrupt_handler(struct airq_struct *airq) +static void ap_interrupt_handler(struct airq_struct *airq, bool floating) { inc_irq_stat(IRQIO_APB); if (!ap_suspend_flag) diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 6a340f2c3556..5ea83dc4f1d7 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -751,8 +751,8 @@ void ap_queue_prepare_remove(struct ap_queue *aq) __ap_flush_queue(aq); /* set REMOVE state to prevent new messages are queued in */ aq->state = AP_STATE_REMOVE; - del_timer_sync(&aq->timeout); spin_unlock_bh(&aq->lock); + del_timer_sync(&aq->timeout); } void ap_queue_remove(struct ap_queue *aq) diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3e85d665c572..45eb0c14b880 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -51,7 +51,8 @@ static debug_info_t *debug_info; static void __init pkey_debug_init(void) { - debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long)); + /* 5 arguments per dbf entry (including the format string ptr) */ + debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long)); debug_register_view(debug_info, &debug_sprintf_view); debug_set_level(debug_info, 3); } diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 689c2af7026a..852b8c2299c1 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -525,7 +525,7 @@ static int zcrypt_open(struct inode *inode, struct file *filp) filp->private_data = (void *) perms; atomic_inc(&zcrypt_open_count); - return nonseekable_open(inode, filp); + return stream_open(inode, filp); } /** @@ -659,6 +659,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); if (mex->outputdatalength < mex->inputdatalength) { + func_code = 0; rc = -EINVAL; goto out; } @@ -742,6 +743,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, trace_s390_zcrypt_req(crt, TP_ICARSACRT); if (crt->outputdatalength < crt->inputdatalength) { + func_code = 0; rc = -EINVAL; goto out; } @@ -951,6 +953,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); if (!targets) { + func_code = 0; rc = -ENOMEM; goto out; } @@ -958,6 +961,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms, uptr = (struct ep11_target_dev __force __user *) xcrb->targets; if (copy_from_user(targets, uptr, target_num * sizeof(*targets))) { + func_code = 0; rc = -EFAULT; goto out_free; } diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 7617d21cb296..f63c5c871d3d 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1595,6 +1595,7 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) if (priv->channel[direction] == NULL) { if (direction == CTCM_WRITE) channel_free(priv->channel[CTCM_READ]); + result = -ENODEV; goto out_dev; } priv->channel[direction]->netdev = dev; diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h index 0aab90817326..66eac2b9704d 100644 --- a/drivers/s390/net/ism.h +++ b/drivers/s390/net/ism.h @@ -6,6 +6,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <net/smc.h> +#include <asm/pci_insn.h> #define UTIL_STR_LEN 16 @@ -194,8 +195,6 @@ struct ism_dev { struct pci_dev *pdev; struct smcd_dev *smcd; - void __iomem *ctl; - struct ism_sba *sba; dma_addr_t sba_dma_addr; DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS); @@ -209,13 +208,37 @@ struct ism_dev { #define ISM_CREATE_REQ(dmb, idx, sf, offset) \ ((dmb) | (idx) << 24 | (sf) << 23 | (offset)) +static inline void __ism_read_cmd(struct ism_dev *ism, void *data, + unsigned long offset, unsigned long len) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, 8); + + while (len > 0) { + __zpci_load(data, req, offset); + offset += 8; + data += 8; + len -= 8; + } +} + +static inline void __ism_write_cmd(struct ism_dev *ism, void *data, + unsigned long offset, unsigned long len) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, len); + + if (len) + __zpci_store_block(data, req, offset); +} + static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data, unsigned int size) { struct zpci_dev *zdev = to_zpci(ism->pdev); u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size); - return zpci_write_block(req, data, dmb_req); + return __zpci_store_block(data, req, dmb_req); } #endif /* S390_ISM_H */ diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 3e132592c1fe..4fc2056bd227 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -38,19 +38,18 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) struct ism_req_hdr *req = cmd; struct ism_resp_hdr *resp = cmd; - memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req)); - memcpy_toio(ism->ctl, req, sizeof(*req)); + __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); + __ism_write_cmd(ism, req, 0, sizeof(*req)); WRITE_ONCE(resp->ret, ISM_ERROR); - memcpy_fromio(resp, ism->ctl, sizeof(*resp)); + __ism_read_cmd(ism, resp, 0, sizeof(*resp)); if (resp->ret) { debug_text_event(ism_debug_info, 0, "cmd failure"); debug_event(ism_debug_info, 0, resp, sizeof(*resp)); goto out; } - memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp), - resp->len - sizeof(*resp)); + __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); out: return resp->ret; } @@ -512,13 +511,9 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_disable; - ism->ctl = pci_iomap(pdev, 2, 0); - if (!ism->ctl) - goto err_resource; - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (ret) - goto err_unmap; + goto err_resource; dma_set_seg_boundary(&pdev->dev, SZ_1M - 1); dma_set_max_seg_size(&pdev->dev, SZ_1M); @@ -527,7 +522,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, ISM_NR_DMBS); if (!ism->smcd) - goto err_unmap; + goto err_resource; ism->smcd->priv = ism; ret = ism_dev_init(ism); @@ -538,8 +533,6 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_free: smcd_free_dev(ism->smcd); -err_unmap: - pci_iounmap(pdev, ism->ctl); err_resource: pci_release_mem_regions(pdev); err_disable: @@ -568,7 +561,6 @@ static void ism_remove(struct pci_dev *pdev) ism_dev_exit(ism); smcd_free_dev(ism->smcd); - pci_iounmap(pdev, ism->ctl); pci_release_mem_regions(pdev); pci_disable_device(pdev); dev_set_drvdata(&pdev->dev, NULL); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index c851cf6e01c4..784a2e76a1b0 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -10,6 +10,7 @@ #ifndef __QETH_CORE_H__ #define __QETH_CORE_H__ +#include <linux/completion.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> @@ -21,6 +22,7 @@ #include <linux/hashtable.h> #include <linux/ip.h> #include <linux/refcount.h> +#include <linux/wait.h> #include <linux/workqueue.h> #include <net/ipv6.h> @@ -163,6 +165,12 @@ struct qeth_vnicc_info { bool rx_bcast_enabled; }; +static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa, + enum qeth_ipa_setadp_cmd func) +{ + return (ipa->supported_funcs & func); +} + static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) { @@ -176,9 +184,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, } #define qeth_adp_supported(c, f) \ - qeth_is_ipa_supported(&c->options.adp, f) -#define qeth_adp_enabled(c, f) \ - qeth_is_ipa_enabled(&c->options.adp, f) + qeth_is_adp_supported(&c->options.adp, f) #define qeth_is_supported(c, f) \ qeth_is_ipa_supported(&c->options.ipa4, f) #define qeth_is_enabled(c, f) \ @@ -217,6 +223,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /* QDIO queue and buffer handling */ /*****************************************************************************/ #define QETH_MAX_QUEUES 4 +#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */ +#define QETH_IQD_MCAST_TXQ 0 +#define QETH_IQD_MIN_UCAST_TXQ 1 #define QETH_IN_BUF_SIZE_DEFAULT 65536 #define QETH_IN_BUF_COUNT_DEFAULT 64 #define QETH_IN_BUF_COUNT_HSDEFAULT 128 @@ -365,34 +374,6 @@ enum qeth_header_ids { #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ -enum qeth_qdio_buffer_states { - /* - * inbound: read out by driver; owned by hardware in order to be filled - * outbound: owned by driver in order to be filled - */ - QETH_QDIO_BUF_EMPTY, - /* - * inbound: filled by hardware; owned by driver in order to be read out - * outbound: filled by driver; owned by hardware in order to be sent - */ - QETH_QDIO_BUF_PRIMED, - /* - * inbound: not applicable - * outbound: identified to be pending in TPQ - */ - QETH_QDIO_BUF_PENDING, - /* - * inbound: not applicable - * outbound: found in completion queue - */ - QETH_QDIO_BUF_IN_CQ, - /* - * inbound: not applicable - * outbound: handled via transfer pending / completion queue - */ - QETH_QDIO_BUF_HANDLED_DELAYED, -}; - enum qeth_qdio_info_states { QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED, @@ -424,6 +405,19 @@ struct qeth_qdio_q { int next_buf_to_init; }; +enum qeth_qdio_out_buffer_state { + /* Owned by driver, in order to be filled. */ + QETH_QDIO_BUF_EMPTY, + /* Filled by driver; owned by hardware in order to be sent. */ + QETH_QDIO_BUF_PRIMED, + /* Identified to be pending in TPQ. */ + QETH_QDIO_BUF_PENDING, + /* Found in completion queue. */ + QETH_QDIO_BUF_IN_CQ, + /* Handled via transfer pending / completion queue. */ + QETH_QDIO_BUF_HANDLED_DELAYED, +}; + struct qeth_qdio_out_buffer { struct qdio_buffer *buffer; atomic_t state; @@ -462,7 +456,6 @@ struct qeth_card_stats { u64 rx_errors; u64 rx_dropped; u64 rx_multicast; - u64 tx_errors; }; struct qeth_out_q_stats { @@ -477,6 +470,7 @@ struct qeth_out_q_stats { u64 skbs_linearized_fail; u64 tso_bytes; u64 packing_mode_switch; + u64 stopped; /* rtnl_link_stats64 */ u64 tx_packets; @@ -490,14 +484,12 @@ struct qeth_qdio_out_q { struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_outbuf_state *bufstates; /* convenience pointer */ struct qeth_out_q_stats stats; - int queue_no; + u8 next_buf_to_fill; + u8 max_elements; + u8 queue_no; + u8 do_pack; struct qeth_card *card; atomic_t state; - int do_pack; - /* - * index of buffer to be filled by driver; state EMPTY or PACKING - */ - int next_buf_to_fill; /* * number of buffers that are currently filled (PRIMED) * -> these buffers are hardware-owned @@ -507,6 +499,11 @@ struct qeth_qdio_out_q { atomic_t set_pci_flags_count; }; +static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) +{ + return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q; +} + struct qeth_qdio_info { atomic_t state; /* input */ @@ -538,7 +535,6 @@ struct qeth_qdio_info { enum qeth_channel_states { CH_STATE_UP, CH_STATE_DOWN, - CH_STATE_ACTIVATING, CH_STATE_HALTED, CH_STATE_STOPPED, CH_STATE_RCD, @@ -585,7 +581,10 @@ struct qeth_cmd_buffer { enum qeth_cmd_buffer_state state; struct qeth_channel *channel; struct qeth_reply *reply; + long timeout; unsigned char *data; + void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob, + unsigned int length); void (*callback)(struct qeth_card *card, struct qeth_channel *channel, struct qeth_cmd_buffer *iob); }; @@ -610,6 +609,11 @@ struct qeth_channel { int io_buf_no; }; +static inline bool qeth_trylock_channel(struct qeth_channel *channel) +{ + return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0; +} + /** * OSA card related definitions */ @@ -631,17 +635,15 @@ struct qeth_seqno { __u32 pdu_hdr; __u32 pdu_hdr_ack; __u16 ipa; - __u32 pkt_seqno; }; struct qeth_reply { struct list_head list; - wait_queue_head_t wait_q; + struct completion received; int (*callback)(struct qeth_card *, struct qeth_reply *, unsigned long); u32 seqno; unsigned long offset; - atomic_t received; int rc; void *param; refcount_t refcnt; @@ -663,7 +665,7 @@ struct qeth_card_info { __u16 func_level; char mcl_level[QETH_MCL_LENGTH + 1]; u8 open_when_online:1; - int guestlan; + u8 is_vm_nic:1; int mac_bits; enum qeth_card_types type; enum qeth_link_types link_type; @@ -774,18 +776,19 @@ struct qeth_card { struct qeth_card_options options; struct workqueue_struct *event_wq; + struct workqueue_struct *cmd_wq; wait_queue_head_t wait_q; - spinlock_t mclock; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; DECLARE_HASHTABLE(mac_htable, 4); DECLARE_HASHTABLE(ip_htable, 4); + struct mutex ip_lock; DECLARE_HASHTABLE(ip_mc_htable, 4); + struct work_struct rx_mode_work; struct work_struct kernel_thread_starter; spinlock_t thread_mask_lock; unsigned long thread_start_mask; unsigned long thread_allowed_mask; unsigned long thread_running_mask; - spinlock_t ip_lock; struct qeth_ipato ipato; struct list_head cmd_waiter_list; /* QDIO buffer handling */ @@ -827,6 +830,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev) return dev->netdev_ops != NULL; } +static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq) +{ + if (txq == QETH_IQD_MCAST_TXQ) + return dev->num_tx_queues - 1; + if (txq == dev->num_tx_queues - 1) + return QETH_IQD_MCAST_TXQ; + return txq; +} + static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, unsigned int elements) { @@ -869,6 +881,16 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) } } +static inline int qeth_get_ether_cast_type(struct sk_buff *skb) +{ + u8 *addr = eth_hdr(skb)->h_dest; + + if (is_multicast_ether_addr(addr)) + return is_broadcast_ether_addr(addr) ? RTN_BROADCAST : + RTN_MULTICAST; + return RTN_UNICAST; +} + static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb, u8 flags) { @@ -922,18 +944,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, data, QETH_PROT_IPV6); } -int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, - int ipv); -static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, - struct sk_buff *skb, - int ipv, int cast_type) -{ - if (IS_IQD(card) && cast_type != RTN_UNICAST) - return card->qdio.out_qs[card->qdio.no_out_queues - 1]; - if (!card->qdio.do_prio_queueing) - return card->qdio.out_qs[card->qdio.default_out_queue]; - return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)]; -} +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb); extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; @@ -979,12 +990,10 @@ void qeth_clear_ipacmd_list(struct qeth_card *); int qeth_qdio_clear_card(struct qeth_card *, int); void qeth_clear_working_pool_list(struct qeth_card *); void qeth_clear_cmd_buffers(struct qeth_channel *); -void qeth_clear_qdio_buffers(struct qeth_card *); +void qeth_drain_output_queues(struct qeth_card *card); void qeth_setadp_promisc_mode(struct qeth_card *); int qeth_setadpparms_change_macaddr(struct qeth_card *); void qeth_tx_timeout(struct net_device *); -void qeth_prepare_control_data(struct qeth_card *, int, - struct qeth_cmd_buffer *); void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, u16 cmd_length); @@ -1016,6 +1025,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); +u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + u8 cast_type, struct net_device *sb_dev); int qeth_open(struct net_device *dev); int qeth_stop(struct net_device *dev); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 44bd6f04c145..009f2c0ec504 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -61,13 +61,13 @@ static struct kmem_cache *qeth_qdio_outbuf_cache; static struct device *qeth_core_root_dev; static struct lock_class_key qdio_out_skb_queue_key; -static void qeth_send_control_data_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob); +static void qeth_issue_next_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_free_buffer_pool(struct qeth_card *); static int qeth_qdio_establish(struct qeth_card *); -static void qeth_free_qdio_buffers(struct qeth_card *); +static void qeth_free_qdio_queues(struct qeth_card *card); static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, enum iucv_tx_notify notification); @@ -85,7 +85,7 @@ static void qeth_close_dev_handler(struct work_struct *work) static const char *qeth_get_cardname(struct qeth_card *card) { - if (card->info.guestlan) { + if (IS_VM_NIC(card)) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return " Virtual NIC QDIO"; @@ -120,7 +120,7 @@ static const char *qeth_get_cardname(struct qeth_card *card) /* max length to be returned: 14 */ const char *qeth_get_cardname_short(struct qeth_card *card) { - if (card->info.guestlan) { + if (IS_VM_NIC(card)) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: return "Virt.NIC QDIO"; @@ -511,7 +511,9 @@ static int __qeth_issue_next_read(struct qeth_card *card) CARD_DEVID(card)); return -ENOMEM; } + qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); + iob->callback = qeth_issue_next_read_cb; QETH_CARD_TEXT(card, 6, "noirqpnd"); rc = ccw_device_start(channel->ccwdev, channel->ccw, (addr_t) iob, 0, 0); @@ -542,11 +544,10 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; - reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); + reply = kzalloc(sizeof(*reply), GFP_KERNEL); if (reply) { refcount_set(&reply->refcnt, 1); - atomic_set(&reply->received, 0); - init_waitqueue_head(&reply->wait_q); + init_completion(&reply->received); } return reply; } @@ -576,10 +577,10 @@ static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) spin_unlock_irq(&card->lock); } -static void qeth_notify_reply(struct qeth_reply *reply) +static void qeth_notify_reply(struct qeth_reply *reply, int reason) { - atomic_inc(&reply->received); - wake_up(&reply->wait_q); + reply->rc = reason; + complete(&reply->received); } static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, @@ -664,10 +665,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry(reply, &card->cmd_waiter_list, list) { - reply->rc = -EIO; - qeth_notify_reply(reply); - } + list_for_each_entry(reply, &card->cmd_waiter_list, list) + qeth_notify_reply(reply, -EIO); spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -675,9 +674,6 @@ EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); static int qeth_check_idx_response(struct qeth_card *card, unsigned char *buffer) { - if (!buffer) - return 0; - QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); if ((buffer[2] & 0xc0) == 0xc0) { QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", @@ -704,6 +700,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) do { if (channel->iob[index].state == BUF_STATE_FREE) { channel->iob[index].state = BUF_STATE_LOCKED; + channel->iob[index].timeout = QETH_TIMEOUT; channel->io_buf_no = (channel->io_buf_no + 1) % QETH_CMD_BUFFER_NO; memset(channel->iob[index].data, 0, QETH_BUFSIZE); @@ -722,7 +719,7 @@ void qeth_release_buffer(struct qeth_channel *channel, spin_lock_irqsave(&channel->iob_lock, flags); iob->state = BUF_STATE_FREE; - iob->callback = qeth_send_control_data_cb; + iob->callback = NULL; if (iob->reply) { qeth_put_reply(iob->reply); iob->reply = NULL; @@ -743,10 +740,8 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) { struct qeth_reply *reply = iob->reply; - if (reply) { - reply->rc = rc; - qeth_notify_reply(reply); - } + if (reply) + qeth_notify_reply(reply, rc); qeth_release_buffer(iob->channel, iob); } @@ -780,9 +775,9 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); -static void qeth_send_control_data_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) +static void qeth_issue_next_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) { struct qeth_ipa_cmd *cmd = NULL; struct qeth_reply *reply = NULL; @@ -846,11 +841,8 @@ static void qeth_send_control_data_cb(struct qeth_card *card, } } - if (rc <= 0) { - reply->rc = rc; - qeth_notify_reply(reply); - } - + if (rc <= 0) + qeth_notify_reply(reply, rc); qeth_put_reply(reply); out: @@ -1173,20 +1165,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_release_skbs(buf); - for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { + for (i = 0; i < queue->max_elements; ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) kmem_cache_free(qeth_core_header_cache, buf->buffer->element[i].addr); buf->is_header[i] = 0; } - qeth_scrub_qdio_buffer(buf->buffer, - QETH_MAX_BUFFER_ELEMENTS(queue->card)); + qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); buf->next_element_to_fill = 0; atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } -static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) +static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) { int j; @@ -1202,19 +1193,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) } } -void qeth_clear_qdio_buffers(struct qeth_card *card) +void qeth_drain_output_queues(struct qeth_card *card) { int i; QETH_CARD_TEXT(card, 2, "clearqdbf"); /* clear outbound buffers to free skbs */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - if (card->qdio.out_qs[i]) { - qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); - } + if (card->qdio.out_qs[i]) + qeth_drain_output_queue(card->qdio.out_qs[i], false); } } -EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); +EXPORT_SYMBOL_GPL(qeth_drain_output_queues); static void qeth_free_buffer_pool(struct qeth_card *card) { @@ -1273,7 +1263,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) break; channel->iob[cnt].state = BUF_STATE_FREE; channel->iob[cnt].channel = channel; - channel->iob[cnt].callback = qeth_send_control_data_cb; } if (cnt < QETH_CMD_BUFFER_NO) { qeth_clean_channel(channel); @@ -1285,30 +1274,28 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) return 0; } -static void qeth_set_single_write_queues(struct qeth_card *card) +static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) { - if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && - (card->qdio.no_out_queues == 4)) - qeth_free_qdio_buffers(card); + unsigned int count = single ? 1 : card->dev->num_tx_queues; - card->qdio.no_out_queues = 1; - if (card->qdio.default_out_queue != 0) - dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + rtnl_lock(); + netif_set_real_num_tx_queues(card->dev, count); + rtnl_unlock(); - card->qdio.default_out_queue = 0; -} + if (card->qdio.no_out_queues == count) + return; -static void qeth_set_multiple_write_queues(struct qeth_card *card) -{ - if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && - (card->qdio.no_out_queues == 1)) { - qeth_free_qdio_buffers(card); - card->qdio.default_out_queue = 2; - } - card->qdio.no_out_queues = 4; + if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) + qeth_free_qdio_queues(card); + + if (count == 1) + dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + + card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE; + card->qdio.no_out_queues = count; } -static void qeth_update_from_chp_desc(struct qeth_card *card) +static int qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; struct channel_path_desc_fmt0 *chp_dsc; @@ -1318,21 +1305,18 @@ static void qeth_update_from_chp_desc(struct qeth_card *card) ccwdev = card->data.ccwdev; chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); if (!chp_dsc) - goto out; + return -ENOMEM; card->info.func_level = 0x4100 + chp_dsc->desc; - if (card->info.type == QETH_CARD_TYPE_IQD) - goto out; - /* CHPP field bit 6 == 1 -> single queue */ - if ((chp_dsc->chpp & 0x02) == 0x02) - qeth_set_single_write_queues(card); - else - qeth_set_multiple_write_queues(card); -out: + if (IS_OSD(card) || IS_OSX(card)) + /* CHPP field bit 6 == 1 -> single queue */ + qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); + kfree(chp_dsc); QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); + return 0; } static void qeth_init_qdio_info(struct qeth_card *card) @@ -1341,12 +1325,11 @@ static void qeth_init_qdio_info(struct qeth_card *card) atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; - card->qdio.no_out_queues = QETH_MAX_QUEUES; /* inbound */ card->qdio.no_in_queues = 1; card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; - if (card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(card)) card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; else card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; @@ -1409,9 +1392,7 @@ static void qeth_setup_card(struct qeth_card *card) card->info.type = CARD_RDEV(card)->id.driver_info; card->state = CARD_STATE_DOWN; - spin_lock_init(&card->mclock); spin_lock_init(&card->lock); - spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); mutex_init(&card->discipline_mutex); @@ -1451,7 +1432,8 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) CARD_WDEV(card) = gdev->cdev[1]; CARD_DDEV(card) = gdev->cdev[2]; - card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); + card->event_wq = alloc_ordered_workqueue("%s_event", 0, + dev_name(&gdev->dev)); if (!card->event_wq) goto out_wq; if (qeth_setup_channel(&card->read, true)) @@ -1571,7 +1553,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, QETH_QDIO_CLEANING)) { case QETH_QDIO_ESTABLISHED: - if (card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(card)) rc = qdio_shutdown(CARD_DDEV(card), QDIO_FLAG_CLEANUP_USING_HALT); else @@ -1644,8 +1626,8 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) card->info.chpid = prcd[30]; card->info.unit_addr2 = prcd[31]; card->info.cula = prcd[63]; - card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && - (prcd[0x11] == _ascebc['M'])); + card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) && + (prcd[0x11] == _ascebc['M'])); } static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) @@ -1709,13 +1691,11 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) { enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; - if (card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSN) + if (IS_OSM(card) || IS_OSN(card)) disc = QETH_DISCIPLINE_LAYER2; - else if (card->info.guestlan) - disc = (card->info.type == QETH_CARD_TYPE_IQD) ? - QETH_DISCIPLINE_LAYER3 : - qeth_vm_detect_layer(card); + else if (IS_VM_NIC(card)) + disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : + qeth_vm_detect_layer(card); switch (disc) { case QETH_DISCIPLINE_LAYER2: @@ -1771,121 +1751,16 @@ static void qeth_init_func_level(struct qeth_card *card) } } -static int qeth_idx_activate_get_answer(struct qeth_card *card, - struct qeth_channel *channel, - void (*reply_cb)(struct qeth_card *, - struct qeth_channel *, - struct qeth_cmd_buffer *)) -{ - struct qeth_cmd_buffer *iob; - int rc; - - QETH_DBF_TEXT(SETUP, 2, "idxanswr"); - iob = qeth_get_buffer(channel); - if (!iob) - return -ENOMEM; - iob->callback = reply_cb; - qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); - - wait_event(card->wait_q, - atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); - QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); - - if (rc) { - QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - atomic_set(&channel->irq_pending, 0); - qeth_release_buffer(channel, iob); - wake_up(&card->wait_q); - return rc; - } - rc = wait_event_interruptible_timeout(card->wait_q, - channel->state == CH_STATE_UP, QETH_TIMEOUT); - if (rc == -ERESTARTSYS) - return rc; - if (channel->state != CH_STATE_UP) { - rc = -ETIME; - QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - } else - rc = 0; - return rc; -} - -static int qeth_idx_activate_channel(struct qeth_card *card, - struct qeth_channel *channel, - void (*reply_cb)(struct qeth_card *, - struct qeth_channel *, - struct qeth_cmd_buffer *)) +static void qeth_idx_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) { - struct qeth_cmd_buffer *iob; - __u16 temp; - __u8 tmp; - int rc; - struct ccw_dev_id temp_devid; - - QETH_DBF_TEXT(SETUP, 2, "idxactch"); + qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, length, iob->data); - iob = qeth_get_buffer(channel); - if (!iob) - return -ENOMEM; - iob->callback = reply_cb; - qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, - iob->data); - if (channel == &card->write) { - memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); - memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), - &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, + QETH_SEQ_NO_LENGTH); + if (iob->channel == &card->write) card->seqno.trans_hdr++; - } else { - memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); - memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), - &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); - } - tmp = ((u8)card->dev->dev_port) | 0x80; - memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); - memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), - &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); - memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), - &card->info.func_level, sizeof(__u16)); - ccw_device_get_id(CARD_DDEV(card), &temp_devid); - memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); - temp = (card->info.cula << 8) + card->info.unit_addr2; - memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); - - wait_event(card->wait_q, - atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); - QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); - spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); - rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, QETH_TIMEOUT); - spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); - - if (rc) { - QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", - rc); - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - atomic_set(&channel->irq_pending, 0); - qeth_release_buffer(channel, iob); - wake_up(&card->wait_q); - return rc; - } - rc = wait_event_interruptible_timeout(card->wait_q, - channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); - if (rc == -ERESTARTSYS) - return rc; - if (channel->state != CH_STATE_ACTIVATING) { - dev_warn(&channel->ccwdev->dev, "The qeth device driver" - " failed to recover an error on the device\n"); - QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n", - CCW_DEVID(channel->ccwdev)); - QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); - return -ETIME; - } - return qeth_idx_activate_get_answer(card, channel, reply_cb); } static int qeth_peer_func_level(int level) @@ -1897,112 +1772,21 @@ static int qeth_peer_func_level(int level) return level; } -static void qeth_idx_write_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) -{ - __u16 temp; - - QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); - - if (channel->state == CH_STATE_DOWN) { - channel->state = CH_STATE_ACTIVATING; - goto out; - } - - if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { - if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) - dev_err(&channel->ccwdev->dev, - "The adapter is used exclusively by another " - "host\n"); - else - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", - CCW_DEVID(channel->ccwdev)); - goto out; - } - memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); - if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", - CCW_DEVID(channel->ccwdev), - card->info.func_level, temp); - goto out; - } - channel->state = CH_STATE_UP; -out: - qeth_release_buffer(channel, iob); -} - -static void qeth_idx_read_cb(struct qeth_card *card, - struct qeth_channel *channel, - struct qeth_cmd_buffer *iob) -{ - __u16 temp; - - QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); - if (channel->state == CH_STATE_DOWN) { - channel->state = CH_STATE_ACTIVATING; - goto out; - } - - if (qeth_check_idx_response(card, iob->data)) - goto out; - - if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { - switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { - case QETH_IDX_ACT_ERR_EXCL: - dev_err(&channel->ccwdev->dev, - "The adapter is used exclusively by another " - "host\n"); - break; - case QETH_IDX_ACT_ERR_AUTH: - case QETH_IDX_ACT_ERR_AUTH_USER: - dev_err(&channel->ccwdev->dev, - "Setting the device online failed because of " - "insufficient authorization\n"); - break; - default: - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", - CCW_DEVID(channel->ccwdev)); - } - QETH_CARD_TEXT_(card, 2, "idxread%c", - QETH_IDX_ACT_CAUSE_CODE(iob->data)); - goto out; - } - - memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); - if (temp != qeth_peer_func_level(card->info.func_level)) { - QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", - CCW_DEVID(channel->ccwdev), - card->info.func_level, temp); - goto out; - } - memcpy(&card->token.issuer_rm_r, - QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), - QETH_MPC_TOKEN_LENGTH); - memcpy(&card->info.mcl_level[0], - QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); - channel->state = CH_STATE_UP; -out: - qeth_release_buffer(channel, iob); -} - -void qeth_prepare_control_data(struct qeth_card *card, int len, - struct qeth_cmd_buffer *iob) +static void qeth_mpc_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) { - qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); - iob->callback = qeth_release_buffer_cb; + qeth_idx_finalize_cmd(card, iob, length); - memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), - &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); - card->seqno.trans_hdr++; memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); card->seqno.pdu_hdr++; memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); - QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); + + iob->reply->seqno = QETH_IDX_COMMAND_SEQNO; + iob->callback = qeth_release_buffer_cb; } -EXPORT_SYMBOL_GPL(qeth_prepare_control_data); /** * qeth_send_control_data() - send control command to the card @@ -2035,17 +1819,12 @@ static int qeth_send_control_data(struct qeth_card *card, int len, void *reply_param) { struct qeth_channel *channel = iob->channel; + long timeout = iob->timeout; int rc; struct qeth_reply *reply = NULL; - unsigned long timeout, event_timeout; - struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); - if (card->read_or_write_problem) { - qeth_release_buffer(channel, iob); - return -EIO; - } reply = qeth_alloc_reply(card); if (!reply) { qeth_release_buffer(channel, iob); @@ -2058,27 +1837,24 @@ static int qeth_send_control_data(struct qeth_card *card, int len, qeth_get_reply(reply); iob->reply = reply; - while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ; - - if (IS_IPA(iob->data)) { - cmd = __ipa_cmd(iob); - cmd->hdr.seqno = card->seqno.ipa++; - reply->seqno = cmd->hdr.seqno; - event_timeout = QETH_IPA_TIMEOUT; - } else { - reply->seqno = QETH_IDX_COMMAND_SEQNO; - event_timeout = QETH_TIMEOUT; + timeout = wait_event_interruptible_timeout(card->wait_q, + qeth_trylock_channel(channel), + timeout); + if (timeout <= 0) { + qeth_put_reply(reply); + qeth_release_buffer(channel, iob); + return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; } - qeth_prepare_control_data(card, len, iob); - qeth_enqueue_reply(card, reply); + iob->finalize(card, iob, len); + QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); - timeout = jiffies + event_timeout; + qeth_enqueue_reply(card, reply); QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, event_timeout); + (addr_t) iob, 0, 0, timeout); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", @@ -2092,30 +1868,211 @@ static int qeth_send_control_data(struct qeth_card *card, int len, return rc; } - /* we have only one long running ipassist, since we can ensure - process context of this command we can sleep */ - if (cmd && cmd->hdr.command == IPA_CMD_SETIP && - cmd->hdr.prot_version == QETH_PROT_IPV4) { - if (!wait_event_timeout(reply->wait_q, - atomic_read(&reply->received), event_timeout)) - goto time_err; - } else { - while (!atomic_read(&reply->received)) { - if (time_after(jiffies, timeout)) - goto time_err; - cpu_relax(); - } - } + timeout = wait_for_completion_interruptible_timeout(&reply->received, + timeout); + if (timeout <= 0) + rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; qeth_dequeue_reply(card, reply); - rc = reply->rc; + if (!rc) + rc = reply->rc; qeth_put_reply(reply); return rc; +} -time_err: - qeth_dequeue_reply(card, reply); - qeth_put_reply(reply); - return -ETIME; +static int qeth_idx_check_activate_response(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + int rc; + + rc = qeth_check_idx_response(card, iob->data); + if (rc) + return rc; + + if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) + return 0; + + /* negative reply: */ + QETH_DBF_TEXT_(SETUP, 2, "idxneg%c", + QETH_IDX_ACT_CAUSE_CODE(iob->data)); + + switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { + case QETH_IDX_ACT_ERR_EXCL: + dev_err(&channel->ccwdev->dev, + "The adapter is used exclusively by another host\n"); + return -EBUSY; + case QETH_IDX_ACT_ERR_AUTH: + case QETH_IDX_ACT_ERR_AUTH_USER: + dev_err(&channel->ccwdev->dev, + "Setting the device online failed because of insufficient authorization\n"); + return -EPERM; + default: + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", + CCW_DEVID(channel->ccwdev)); + return -EIO; + } +} + +static void qeth_idx_query_read_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + u16 peer_level; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxrdcb"); + + rc = qeth_idx_check_activate_response(card, channel, iob); + if (rc) + goto out; + + memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if (peer_level != qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, peer_level); + rc = -EINVAL; + goto out; + } + + memcpy(&card->token.issuer_rm_r, + QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + memcpy(&card->info.mcl_level[0], + QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); + +out: + qeth_notify_reply(iob->reply, rc); + qeth_release_buffer(channel, iob); +} + +static void qeth_idx_query_write_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + u16 peer_level; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxwrcb"); + + rc = qeth_idx_check_activate_response(card, channel, iob); + if (rc) + goto out; + + memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if ((peer_level & ~0x0100) != + qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, peer_level); + rc = -EINVAL; + } + +out: + qeth_notify_reply(iob->reply, rc); + qeth_release_buffer(channel, iob); +} + +static void qeth_idx_finalize_query_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) +{ + qeth_setup_ccw(iob->channel->ccw, CCW_CMD_READ, length, iob->data); +} + +static void qeth_idx_activate_cb(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + qeth_notify_reply(iob->reply, 0); + qeth_release_buffer(channel, iob); +} + +static void qeth_idx_setup_activate_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + u16 addr = (card->info.cula << 8) + card->info.unit_addr2; + u8 port = ((u8)card->dev->dev_port) | 0x80; + struct ccw_dev_id dev_id; + + ccw_device_get_id(CARD_DDEV(card), &dev_id); + iob->finalize = qeth_idx_finalize_cmd; + iob->callback = qeth_idx_activate_cb; + + memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); + memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), + &card->info.func_level, 2); + memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2); + memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); +} + +static int qeth_idx_activate_read_channel(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->read; + struct qeth_cmd_buffer *iob; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxread"); + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); + qeth_idx_setup_activate_cmd(card, iob); + + rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL); + if (rc) + return rc; + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + iob->finalize = qeth_idx_finalize_query_cmd; + iob->callback = qeth_idx_query_read_cb; + rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL); + if (rc) + return rc; + + channel->state = CH_STATE_UP; + return 0; +} + +static int qeth_idx_activate_write_channel(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->write; + struct qeth_cmd_buffer *iob; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "idxwrite"); + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); + qeth_idx_setup_activate_cmd(card, iob); + + rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL); + if (rc) + return rc; + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + + iob->finalize = qeth_idx_finalize_query_cmd; + iob->callback = qeth_idx_query_write_cb; + rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL); + if (rc) + return rc; + + channel->state = CH_STATE_UP; + return 0; } static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, @@ -2140,7 +2097,9 @@ static int qeth_cm_enable(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "cmenable"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); + memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), @@ -2173,7 +2132,9 @@ static int qeth_cm_setup(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "cmsetup"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); + memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), @@ -2206,7 +2167,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) /* adjust RX buffer size to new max MTU: */ card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; if (dev->max_mtu && dev->max_mtu != max_mtu) - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); } else { if (dev->mtu) new_mtu = dev->mtu; @@ -2253,7 +2214,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, memcpy(&card->token.ulp_filter_r, QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), QETH_MPC_TOKEN_LENGTH); - if (card->info.type == QETH_CARD_TYPE_IQD) { + if (IS_IQD(card)) { memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); mtu = qeth_get_mtu_outof_framesize(framesize); } else { @@ -2290,6 +2251,7 @@ static int qeth_ulp_enable(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; @@ -2336,6 +2298,7 @@ static int qeth_ulp_setup(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), @@ -2377,12 +2340,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q) if (!q) return; - qeth_clear_outq_buffers(q, 1); + qeth_drain_output_queue(q, true); qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); kfree(q); } -static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) +static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) { struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); @@ -2396,7 +2359,7 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) return q; } -static int qeth_alloc_qdio_buffers(struct qeth_card *card) +static int qeth_alloc_qdio_queues(struct qeth_card *card) { int i, j; @@ -2417,11 +2380,12 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) /* outbound */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); + card->qdio.out_qs[i] = qeth_alloc_output_queue(); if (!card->qdio.out_qs[i]) goto out_freeoutq; QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); + card->qdio.out_qs[i]->card = card; card->qdio.out_qs[i]->queue_no = i; /* give outbound qeth_qdio_buffers their qdio_buffers */ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { @@ -2458,7 +2422,7 @@ out_nomem: return -ENOMEM; } -static void qeth_free_qdio_buffers(struct qeth_card *card) +static void qeth_free_qdio_queues(struct qeth_card *card) { int i, j; @@ -2523,6 +2487,7 @@ static int qeth_dm_act(struct qeth_card *card) QETH_DBF_TEXT(SETUP, 2, "dmact"); iob = qeth_wait_for_buffer(&card->write); + iob->finalize = qeth_mpc_finalize_cmd; memcpy(iob->data, DM_ACT, DM_ACT_SIZE); memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), @@ -2564,7 +2529,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; } - rc = qeth_alloc_qdio_buffers(card); + rc = qeth_alloc_qdio_queues(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); goto out_qdio; @@ -2572,7 +2537,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) rc = qeth_qdio_establish(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); goto out_qdio; } rc = qeth_qdio_activate(card); @@ -2588,7 +2553,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) return 0; out_qdio: - qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + qeth_qdio_clear_card(card, !IS_IQD(card)); qdio_free(CARD_DDEV(card)); return rc; } @@ -2611,8 +2576,7 @@ void qeth_print_status_message(struct qeth_card *card) } /* fallthrough */ case QETH_CARD_TYPE_IQD: - if ((card->info.guestlan) || - (card->info.mcl_level[0] & 0x80)) { + if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { card->info.mcl_level[0] = (char) _ebcasc[(__u8) card->info.mcl_level[0]]; card->info.mcl_level[1] = (char) _ebcasc[(__u8) @@ -2733,7 +2697,7 @@ static int qeth_init_input_buffer(struct qeth_card *card, int qeth_init_qdio_queues(struct qeth_card *card) { - int i, j; + unsigned int i; int rc; QETH_DBF_TEXT(SETUP, 2, "initqdqs"); @@ -2762,19 +2726,15 @@ int qeth_init_qdio_queues(struct qeth_card *card) /* outbound queue */ for (i = 0; i < card->qdio.no_out_queues; ++i) { - qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs, - QDIO_MAX_BUFFERS_PER_Q); - for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { - qeth_clear_output_buffer(card->qdio.out_qs[i], - card->qdio.out_qs[i]->bufs[j]); - } - card->qdio.out_qs[i]->card = card; - card->qdio.out_qs[i]->next_buf_to_fill = 0; - card->qdio.out_qs[i]->do_pack = 0; - atomic_set(&card->qdio.out_qs[i]->used_buffers, 0); - atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); - atomic_set(&card->qdio.out_qs[i]->state, - QETH_OUT_Q_UNLOCKED); + struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; + + qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + queue->next_buf_to_fill = 0; + queue->do_pack = 0; + atomic_set(&queue->used_buffers, 0); + atomic_set(&queue->set_pci_flags_count, 0); + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); } return 0; } @@ -2805,12 +2765,26 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, cmd->hdr.prot_version = prot; } +static void qeth_ipa_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int length) +{ + qeth_mpc_finalize_cmd(card, iob, length); + + /* override with IPA-specific values: */ + __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa; + iob->reply->seqno = card->seqno.ipa++; +} + void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, u16 cmd_length) { u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length; u8 prot_type = qeth_mpc_select_prot_type(card); + iob->finalize = qeth_ipa_finalize_cmd; + iob->timeout = QETH_IPA_TIMEOUT; + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); @@ -2866,6 +2840,11 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, QETH_CARD_TEXT(card, 4, "sendipa"); + if (card->read_or_write_problem) { + qeth_release_buffer(iob->channel, iob); + return -EIO; + } + if (reply_cb == NULL) reply_cb = qeth_send_ipa_cmd_cb; memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); @@ -3251,7 +3230,7 @@ static void qeth_handle_send_error(struct qeth_card *card, int sbalf15 = buffer->buffer->element[15].sflags; QETH_CARD_TEXT(card, 6, "hdsnderr"); - if (card->info.type == QETH_CARD_TYPE_IQD) { + if (IS_IQD(card)) { if (sbalf15 == 0) { qdio_err = 0; } else { @@ -3348,7 +3327,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, if (queue->bufstates) queue->bufstates[bidx].user = buf; - if (queue->card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(queue->card)) continue; if (!queue->do_pack) { @@ -3378,11 +3357,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } QETH_TXQ_STAT_ADD(queue, bufs, count); - netif_trans_update(queue->card->dev); qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; - atomic_add(count, &queue->used_buffers); rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (rc) { @@ -3422,7 +3399,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) * do_send_packet. So, we check if there is a * packing buffer to be flushed here. */ - netif_stop_queue(queue->card->dev); index = queue->next_buf_to_fill; q_was_packing = queue->do_pack; /* queue->do_pack may change */ @@ -3467,7 +3443,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) goto out; } - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); card->options.cq = cq; rc = 0; } @@ -3493,7 +3469,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); if (qdio_err) { - netif_stop_queue(card->dev); + netif_tx_stop_all_queues(card->dev); qeth_schedule_recovery(card); return; } @@ -3549,12 +3525,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_buffer *buffer; + struct net_device *dev = card->dev; + struct netdev_queue *txq; int i; QETH_CARD_TEXT(card, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_FATAL) { QETH_CARD_TEXT(card, 2, "achkcond"); - netif_stop_queue(card->dev); + netif_tx_stop_all_queues(dev); qeth_schedule_recovery(card); return; } @@ -3580,7 +3558,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, /* prepare the queue slot for re-use: */ qeth_scrub_qdio_buffer(buffer->buffer, - QETH_MAX_BUFFER_ELEMENTS(card)); + queue->max_elements); if (qeth_init_qdio_out_buf(queue, bidx)) { QETH_CARD_TEXT(card, 2, "outofbuf"); qeth_schedule_recovery(card); @@ -3600,33 +3578,32 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) qeth_check_outbound_queue(queue); - netif_wake_queue(queue->card->dev); -} - -/* We cannot use outbound queue 3 for unicast packets on HiperSockets */ -static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) -{ - if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3)) - return 2; - return queue_num; + if (IS_IQD(card)) + __queue = qeth_iqd_translate_txq(dev, __queue); + txq = netdev_get_tx_queue(dev, __queue); + /* xmit may have observed the full-condition, but not yet stopped the + * txq. In which case the code below won't trigger. So before returning, + * xmit will re-check the txq's fill level and wake it up if needed. + */ + if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) + netif_tx_wake_queue(txq); } /** * Note: Function assumes that we have 4 outbound queues. */ -int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, - int ipv) +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) { - __be16 *tci; + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); u8 tos; switch (card->qdio.do_prio_queueing) { case QETH_PRIO_Q_ING_TOS: case QETH_PRIO_Q_ING_PREC: - switch (ipv) { + switch (qeth_get_ip_version(skb)) { case 4: tos = ipv4_get_dsfield(ip_hdr(skb)); break; @@ -3637,9 +3614,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, return card->qdio.default_out_queue; } if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) - return qeth_cut_iqd_prio(card, ~tos >> 6 & 3); + return ~tos >> 6 & 3; if (tos & IPTOS_MINCOST) - return qeth_cut_iqd_prio(card, 3); + return 3; if (tos & IPTOS_RELIABILITY) return 2; if (tos & IPTOS_THROUGHPUT) @@ -3650,12 +3627,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, case QETH_PRIO_Q_ING_SKB: if (skb->priority > 5) return 0; - return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); + return ~skb->priority >> 1 & 3; case QETH_PRIO_Q_ING_VLAN: - tci = &((struct ethhdr *)skb->data)->h_proto; - if (be16_to_cpu(*tci) == ETH_P_8021Q) - return qeth_cut_iqd_prio(card, - ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3); + if (veth->h_vlan_proto == htons(ETH_P_8021Q)) + return ~ntohs(veth->h_vlan_TCI) >> + (VLAN_PRIO_SHIFT + 1) & 3; break; default: break; @@ -3729,8 +3705,8 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, unsigned int hdr_len, unsigned int proto_len, unsigned int *elements) { - const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card); const unsigned int contiguous = proto_len ? proto_len : 1; + const unsigned int max_elements = queue->max_elements; unsigned int __elements; addr_t start, end; bool push_ok; @@ -3867,11 +3843,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb, * from qeth_core_header_cache. * @offset: when mapping the skb, start at skb->data + offset * @hd_len: if > 0, build a dedicated header element of this size + * flush: Prepare the buffer to be flushed, regardless of its fill level. */ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len) + unsigned int offset, unsigned int hd_len, + bool flush) { struct qdio_buffer *buffer = buf->buffer; bool is_first_elem = true; @@ -3900,8 +3878,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, QETH_TXQ_STAT_INC(queue, skbs_pack); /* If the buffer still has free elements, keep using it. */ - if (buf->next_element_to_fill < - QETH_MAX_BUFFER_ELEMENTS(queue->card)) + if (!flush && + buf->next_element_to_fill < queue->max_elements) return 0; } @@ -3918,15 +3896,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, { int index = queue->next_buf_to_fill; struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; + struct netdev_queue *txq; + bool stopped = false; - /* - * check if buffer is empty to make sure that we do not 'overtake' - * ourselves and try to fill a buffer that is already primed + /* Just a sanity check, the wake/stop logic should ensure that we always + * get a free buffer. */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); + + txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb)); + + if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + /* If a TX completion happens right _here_ and misses to wake + * the txq, then our re-check below will catch the race. + */ + QETH_TXQ_STAT_INC(queue, stopped); + netif_tx_stop_queue(txq); + stopped = true; + } + + qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped); qeth_flush_buffers(queue, index, 1); + + if (stopped && !qeth_out_queue_is_full(queue)) + netif_tx_start_queue(txq); return 0; } @@ -3936,6 +3930,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int elements_needed) { struct qeth_qdio_out_buffer *buffer; + struct netdev_queue *txq; + bool stopped = false; int start_index; int flush_count = 0; int do_pack = 0; @@ -3947,21 +3943,24 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); start_index = queue->next_buf_to_fill; buffer = queue->bufs[queue->next_buf_to_fill]; - /* - * check if buffer is empty to make sure that we do not 'overtake' - * ourselves and try to fill a buffer that is already primed + + /* Just a sanity check, the wake/stop logic should ensure that we always + * get a free buffer. */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); return -EBUSY; } + + txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); + /* check if we need to switch packing state of this queue */ qeth_switch_to_packing_if_needed(queue); if (queue->do_pack) { do_pack = 1; /* does packet fit in current buffer? */ - if ((QETH_MAX_BUFFER_ELEMENTS(card) - - buffer->next_element_to_fill) < elements_needed) { + if (buffer->next_element_to_fill + elements_needed > + queue->max_elements) { /* ... no -> set state PRIMED */ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); flush_count++; @@ -3969,8 +3968,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[queue->next_buf_to_fill]; - /* we did a step forward, so check buffer state - * again */ + + /* We stepped forward, so sanity-check again: */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { qeth_flush_buffers(queue, start_index, @@ -3983,8 +3982,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } - flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, - hd_len); + if (buffer->next_element_to_fill == 0 && + atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + /* If a TX completion happens right _here_ and misses to wake + * the txq, then our re-check below will catch the race. + */ + QETH_TXQ_STAT_INC(queue, stopped); + netif_tx_stop_queue(txq); + stopped = true; + } + + flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, + stopped); if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) @@ -4015,6 +4024,8 @@ out: if (do_pack) QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); + if (stopped && !qeth_out_queue_is_full(queue)) + netif_tx_start_queue(txq); return rc; } EXPORT_SYMBOL_GPL(qeth_do_send_packet); @@ -4101,9 +4112,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, } else { if (!push_len) kmem_cache_free(qeth_core_header_cache, hdr); - if (rc == -EBUSY) - /* roll back to ETH header */ - skb_pull(skb, push_len); } return rc; } @@ -4321,9 +4329,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) QETH_CARD_TEXT(card, 4, "setactlo"); - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX) && - qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + if ((IS_OSD(card) || IS_OSX(card)) && + qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, fallback); if (rc) { @@ -4348,7 +4355,6 @@ void qeth_tx_timeout(struct net_device *dev) card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "txtimeo"); - QETH_CARD_STAT_INC(card, tx_errors); qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_tx_timeout); @@ -4489,7 +4495,7 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) QETH_CARD_TEXT(card, 3, "snmpcmd"); - if (card->info.guestlan) + if (IS_VM_NIC(card)) return -EOPNOTSUPP; if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && @@ -4732,14 +4738,6 @@ out: } EXPORT_SYMBOL_GPL(qeth_vm_request_mac); -static int qeth_get_qdio_q_format(struct qeth_card *card) -{ - if (card->info.type == QETH_CARD_TYPE_IQD) - return QDIO_IQDIO_QFMT; - else - return QDIO_QETH_QFMT; -} - static void qeth_determine_capabilities(struct qeth_card *card) { int rc; @@ -4878,7 +4876,8 @@ static int qeth_qdio_establish(struct qeth_card *card) memset(&init_data, 0, sizeof(struct qdio_initialize)); init_data.cdev = CARD_DDEV(card); - init_data.q_format = qeth_get_qdio_q_format(card); + init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : + QDIO_QETH_QFMT; init_data.qib_param_field_format = 0; init_data.qib_param_field = qib_param_field; init_data.no_input_qs = card->qdio.no_in_queues; @@ -4890,8 +4889,7 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.input_sbal_addr_array = in_sbal_ptrs; init_data.output_sbal_addr_array = out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; - init_data.scan_threshold = - (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; + init_data.scan_threshold = IS_IQD(card) ? 1 : 32; if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { @@ -4937,7 +4935,7 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->write); qeth_clean_channel(&card->data); destroy_workqueue(card->event_wq); - qeth_free_qdio_buffers(card); + qeth_free_qdio_queues(card); unregister_service_level(&card->qeth_service_level); dev_set_drvdata(&card->gdev->dev, NULL); kfree(card); @@ -4986,12 +4984,14 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); atomic_set(&card->force_alloc_skb, 0); - qeth_update_from_chp_desc(card); + rc = qeth_update_from_chp_desc(card); + if (rc) + return rc; retry: if (retries < 3) QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", CARD_DEVID(card)); - rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + rc = qeth_qdio_clear_card(card, !IS_IQD(card)); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); @@ -5019,8 +5019,9 @@ retriable: qeth_determine_capabilities(card); qeth_init_tokens(card); qeth_init_func_level(card); - rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb); - if (rc == -ERESTARTSYS) { + + rc = qeth_idx_activate_read_channel(card); + if (rc == -EINTR) { QETH_DBF_TEXT(SETUP, 2, "break2"); return rc; } else if (rc) { @@ -5030,8 +5031,9 @@ retriable: else goto retry; } - rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb); - if (rc == -ERESTARTSYS) { + + rc = qeth_idx_activate_write_channel(card); + if (rc == -EINTR) { QETH_DBF_TEXT(SETUP, 2, "break3"); return rc; } else if (rc) { @@ -5171,7 +5173,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, return NULL; if (((skb_len >= card->options.rx_sg_cb) && - (!(card->info.type == QETH_CARD_TYPE_OSN)) && + !IS_OSN(card) && (!atomic_read(&card->force_alloc_skb))) || (card->options.cq == QETH_CQ_ENABLED)) use_rx_sg = 1; @@ -5562,13 +5564,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) switch (card->info.type) { case QETH_CARD_TYPE_IQD: - dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup); + dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN, + ether_setup, QETH_MAX_QUEUES, 1); + break; + case QETH_CARD_TYPE_OSM: + dev = alloc_etherdev(0); break; case QETH_CARD_TYPE_OSN: dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); break; default: - dev = alloc_etherdev(0); + dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1); } if (!dev) @@ -5590,8 +5596,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->hw_features |= NETIF_F_SG; dev->vlan_features |= NETIF_F_SG; - if (IS_IQD(card)) + if (IS_IQD(card)) { + netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ); dev->features |= NETIF_F_SG; + } } return dev; @@ -5641,14 +5649,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) } qeth_setup_card(card); - qeth_update_from_chp_desc(card); - card->dev = qeth_alloc_netdev(card); if (!card->dev) { rc = -ENOMEM; goto err_card; } + card->qdio.no_out_queues = card->dev->num_tx_queues; + rc = qeth_update_from_chp_desc(card); + if (rc) + goto err_chp_desc; qeth_determine_capabilities(card); enforced_disc = qeth_enforce_discipline(card); switch (enforced_disc) { @@ -5661,9 +5671,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) if (rc) goto err_load; - gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) - ? card->discipline->devtype - : &qeth_osn_devtype; + gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : + card->discipline->devtype; rc = card->discipline->setup(card->gdev); if (rc) goto err_disc; @@ -5675,6 +5684,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev) err_disc: qeth_core_free_discipline(card); err_load: +err_chp_desc: free_netdev(card->dev); err_card: qeth_core_free_card(card); @@ -5706,10 +5716,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev) enum qeth_discipline_id def_discipline; if (!card->discipline) { - if (card->info.type == QETH_CARD_TYPE_IQD) - def_discipline = QETH_DISCIPLINE_LAYER3; - else - def_discipline = QETH_DISCIPLINE_LAYER2; + def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : + QETH_DISCIPLINE_LAYER2; rc = qeth_core_load_discipline(card, def_discipline); if (rc) goto err; @@ -5737,7 +5745,7 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev) if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); + qeth_drain_output_queues(card); qdio_free(CARD_DDEV(card)); } @@ -5837,13 +5845,10 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); break; case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) && - !card->info.guestlan) + if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && + !IS_VM_NIC(card)) return 1; - else - return 0; + return 0; case SIOCGMIIPHY: mii_data = if_mii(rq); mii_data->phy_id = 0; @@ -6193,7 +6198,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_errors = card->stats.rx_errors; stats->rx_dropped = card->stats.rx_dropped; stats->multicast = card->stats.rx_multicast; - stats->tx_errors = card->stats.tx_errors; for (i = 0; i < card->qdio.no_out_queues; i++) { queue = card->qdio.out_qs[i]; @@ -6206,6 +6210,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) } EXPORT_SYMBOL_GPL(qeth_get_stats64); +u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + u8 cast_type, struct net_device *sb_dev) +{ + if (cast_type != RTN_UNICAST) + return QETH_IQD_MCAST_TXQ; + return QETH_IQD_MIN_UCAST_TXQ; +} +EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); + int qeth_open(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; @@ -6216,7 +6229,7 @@ int qeth_open(struct net_device *dev) return -EIO; card->data.state = CH_STATE_UP; - netif_start_queue(dev); + netif_tx_start_all_queues(dev); napi_enable(&card->napi); local_bh_disable(); diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index f8c5d4a9be13..f5237b7c14c4 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -82,7 +82,7 @@ enum qeth_card_types { #define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM) #define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN) #define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX) -#define IS_VM_NIC(card) ((card)->info.guestlan) +#define IS_VM_NIC(card) ((card)->info.is_vm_nic) #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 /* only the first two bytes are looked at in qeth_get_cardname_short */ diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 56deeb6f7bc0..9f392497d570 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, if (!card) return -EINVAL; + if (IS_IQD(card)) + return -EOPNOTSUPP; + mutex_lock(&card->conf_mutex); if (card->state != CARD_STATE_DOWN) { rc = -EPERM; @@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = 2; } else if (sysfs_streq(buf, "no_prio_queueing:3")) { - if (card->info.type == QETH_CARD_TYPE_IQD) { - rc = -EPERM; - goto out; - } card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; card->qdio.default_out_queue = 3; } else if (sysfs_streq(buf, "no_prio_queueing")) { @@ -480,8 +479,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); - if (card->info.type != QETH_CARD_TYPE_OSD && - card->info.type != QETH_CARD_TYPE_OSX) { + if (!IS_OSD(card) && !IS_OSX(card)) { rc = -EOPNOTSUPP; dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index 93a53fed4cf8..4166eb29f0bd 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = { QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail), QETH_TXQ_STAT("TSO bytes", tso_bytes), QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), + QETH_TXQ_STAT("Queue stopped", stopped), }; static const struct qeth_stats card_stats[] = { @@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev, CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); } +static void qeth_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qeth_card *card = dev->ml_priv; + + channels->max_rx = dev->num_rx_queues; + channels->max_tx = card->qdio.no_out_queues; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = dev->real_num_rx_queues; + channels->tx_count = dev->real_num_tx_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + /* Helper function to fill 'advertising' and 'supported' which are the same. */ /* Autoneg and full-duplex are supported and advertised unconditionally. */ /* Always advertise and support all speeds up to specified, and only one */ @@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = { .get_ethtool_stats = qeth_get_ethtool_stats, .get_sset_count = qeth_get_sset_count, .get_drvinfo = qeth_get_drvinfo, + .get_channels = qeth_get_channels, .get_link_ksettings = qeth_get_link_ksettings, }; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c3067fd3bd9e..218801232ca2 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -149,29 +149,16 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) return rc; } -static void qeth_l2_del_all_macs(struct qeth_card *card) +static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card) { struct qeth_mac *mac; struct hlist_node *tmp; int i; - spin_lock_bh(&card->mclock); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { hash_del(&mac->hnode); kfree(mac); } - spin_unlock_bh(&card->mclock); -} - -static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) -{ - if (card->info.type == QETH_CARD_TYPE_OSN) - return RTN_UNICAST; - if (is_broadcast_ether_addr(skb->data)) - return RTN_BROADCAST; - if (is_multicast_ether_addr(skb->data)) - return RTN_MULTICAST; - return RTN_UNICAST; } static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, @@ -292,14 +279,16 @@ static void qeth_l2_stop_card(struct qeth_card *card) qeth_set_allowed_threads(card, 0, 1); + cancel_work_sync(&card->rx_mode_work); + qeth_l2_drain_rx_mode_cache(card); + if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l2_del_all_macs(card); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); + qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } @@ -334,13 +323,11 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, case QETH_HEADER_TYPE_LAYER2: skb->protocol = eth_type_trans(skb, skb->dev); qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]); - if (skb->protocol == htons(ETH_P_802_2)) - *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; len = skb->len; napi_gro_receive(&card->napi, skb); break; case QETH_HEADER_TYPE_OSN: - if (card->info.type == QETH_CARD_TYPE_OSN) { + if (IS_OSN(card)) { skb_push(skb, sizeof(struct qeth_hdr)); skb_copy_to_linear_data(skb, hdr, sizeof(struct qeth_hdr)); @@ -391,8 +378,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) } /* some devices don't support a custom MAC address: */ - if (card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) + if (IS_OSM(card) || IS_OSX(card)) return (rc) ? rc : -EADDRNOTAVAIL; eth_hw_addr_random(card->dev); @@ -515,9 +501,11 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) hash_add(card->mac_htable, &mac->hnode, mac_hash); } -static void qeth_l2_set_rx_mode(struct net_device *dev) +static void qeth_l2_rx_mode_work(struct work_struct *work) { - struct qeth_card *card = dev->ml_priv; + struct qeth_card *card = container_of(work, struct qeth_card, + rx_mode_work); + struct net_device *dev = card->dev; struct netdev_hw_addr *ha; struct qeth_mac *mac; struct hlist_node *tmp; @@ -526,12 +514,12 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) QETH_CARD_TEXT(card, 3, "setmulti"); - spin_lock_bh(&card->mclock); - + netif_addr_lock_bh(dev); netdev_for_each_mc_addr(ha, dev) qeth_l2_add_mac(card, ha); netdev_for_each_uc_addr(ha, dev) qeth_l2_add_mac(card, ha); + netif_addr_unlock_bh(dev); hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { switch (mac->disp_flag) { @@ -554,8 +542,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) } } - spin_unlock_bh(&card->mclock); - if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) qeth_setadp_promisc_mode(card); else @@ -586,7 +572,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, } elements += qeth_count_elements(skb, hd_len); - if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) { + if (elements > queue->max_elements) { rc = -E2BIG; goto out; } @@ -603,37 +589,45 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - int cast_type = qeth_l2_get_cast_type(card, skb); - int ipv = qeth_get_ip_version(skb); + u16 txq = skb_get_queue_mapping(skb); struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; int rc; - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); - - netif_stop_queue(dev); + if (IS_IQD(card)) + txq = qeth_iqd_translate_txq(dev, txq); + queue = card->qdio.out_qs[txq]; if (IS_OSN(card)) rc = qeth_l2_xmit_osn(card, skb, queue); else - rc = qeth_xmit(card, skb, queue, ipv, cast_type, + rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb), + qeth_get_ether_cast_type(skb), qeth_l2_fill_header); if (!rc) { QETH_TXQ_STAT_INC(queue, tx_packets); QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); - netif_wake_queue(dev); return NETDEV_TX_OK; - } else if (rc == -EBUSY) { - return NETDEV_TX_BUSY; - } /* else fall through */ + } QETH_TXQ_STAT_INC(queue, tx_dropped); kfree_skb(skb); - netif_wake_queue(dev); return NETDEV_TX_OK; } +static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct qeth_card *card = dev->ml_priv; + + if (IS_IQD(card)) + return qeth_iqd_select_queue(dev, skb, + qeth_get_ether_cast_type(skb), + sb_dev); + return qeth_get_priority_queue(card, skb); +} + static const struct device_type qeth_l2_devtype = { .name = "qeth_layer2", .groups = qeth_l2_attr_groups, @@ -653,6 +647,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) } hash_init(card->mac_htable); + INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work); return 0; } @@ -673,12 +668,20 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) unregister_netdev(card->dev); } +static void qeth_l2_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + schedule_work(&card->rx_mode_work); +} + static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_features_check = qeth_features_check, + .ndo_select_queue = qeth_l2_select_queue, .ndo_validate_addr = qeth_l2_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -721,7 +724,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok) card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; } - if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { + if (IS_OSD(card) && !IS_VM_NIC(card)) { card->dev->features |= NETIF_F_SG; /* OSA 3S and earlier has no RX/TX support */ if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { @@ -831,8 +834,7 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev) /* softsetup */ QETH_DBF_TEXT(SETUP, 2, "softsetp"); - if ((card->info.type == QETH_CARD_TYPE_OSD) || - (card->info.type == QETH_CARD_TYPE_OSX)) { + if (IS_OSD(card) || IS_OSX(card)) { rc = qeth_l2_start_ipassists(card); if (rc) goto out_remove; @@ -1042,13 +1044,13 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 5, "osndctrd"); - wait_event(card->wait_q, - atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); - qeth_prepare_control_data(card, len, iob); + wait_event(card->wait_q, qeth_trylock_channel(channel)); + iob->finalize(card, iob, len); + QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, - (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); + (addr_t) iob, 0, 0, iob->timeout); spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " @@ -1456,9 +1458,8 @@ static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, enum qeth_ipa_sbp_cmd sbp_cmd, unsigned int cmd_length) { - enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ? - IPA_CMD_SETBRIDGEPORT_IQD : - IPA_CMD_SETBRIDGEPORT_OSA; + enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD : + IPA_CMD_SETBRIDGEPORT_OSA; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 53712cf26406..0271833da6a2 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -246,9 +246,9 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) */ if (addr->proto == QETH_PROT_IPV4) { addr->in_progress = 1; - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); rc = qeth_l3_register_addr_entry(card, addr); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); addr->in_progress = 0; } else rc = qeth_l3_register_addr_entry(card, addr); @@ -268,6 +268,30 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) return rc; } +static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr, + bool add) +{ + int rc; + + mutex_lock(&card->ip_lock); + rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr); + mutex_unlock(&card->ip_lock); + + return rc; +} + +static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + + hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_del(&addr->hnode); + kfree(addr); + } +} + static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) { struct qeth_ipaddr *addr; @@ -276,7 +300,7 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) QETH_CARD_TEXT(card, 4, "clearip"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { if (!recover) { @@ -287,19 +311,9 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) addr->disp_flag = QETH_DISP_ADDR_ADD; } - spin_unlock_bh(&card->ip_lock); - - spin_lock_bh(&card->mclock); - - hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { - hash_del(&addr->hnode); - kfree(addr); - } - - spin_unlock_bh(&card->mclock); - - + mutex_unlock(&card->ip_lock); } + static void qeth_l3_recover_ip(struct qeth_card *card) { struct qeth_ipaddr *addr; @@ -309,15 +323,15 @@ static void qeth_l3_recover_ip(struct qeth_card *card) QETH_CARD_TEXT(card, 4, "recovrip"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { if (addr->disp_flag == QETH_DISP_ADDR_ADD) { if (addr->proto == QETH_PROT_IPV4) { addr->in_progress = 1; - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); rc = qeth_l3_register_addr_entry(card, addr); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); addr->in_progress = 0; } else rc = qeth_l3_register_addr_entry(card, addr); @@ -333,8 +347,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card) } } - spin_unlock_bh(&card->ip_lock); - + mutex_unlock(&card->ip_lock); } static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, @@ -461,7 +474,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, static int qeth_l3_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type, enum qeth_prot_versions prot) { - if (card->info.type == QETH_CARD_TYPE_IQD) { + if (IS_IQD(card)) { switch (*type) { case NO_ROUTER: case PRIMARY_CONNECTOR: @@ -559,7 +572,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) { struct qeth_ipato_entry *ipatoe, *tmp; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { list_del(&ipatoe->entry); @@ -567,7 +580,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) } qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } int qeth_l3_add_ipato_entry(struct qeth_card *card, @@ -578,7 +591,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "addipato"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != new->proto) @@ -596,7 +609,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, qeth_l3_update_ipato(card); } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); return rc; } @@ -610,7 +623,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "delipato"); - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { if (ipatoe->proto != proto) @@ -625,7 +638,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card, } } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); return rc; } @@ -634,7 +647,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, enum qeth_prot_versions proto) { struct qeth_ipaddr addr; - int rc; qeth_l3_init_ipaddr(&addr, type, proto); if (proto == QETH_PROT_IPV4) @@ -642,16 +654,13 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, else memcpy(&addr.u.a6.addr, ip, 16); - spin_lock_bh(&card->ip_lock); - rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); - spin_unlock_bh(&card->ip_lock); - return rc; + return qeth_l3_modify_ip(card, &addr, add); } int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) { struct qeth_ipaddr addr; - int rc, i; + unsigned int i; qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); addr.u.a6.addr.s6_addr[0] = 0xfe; @@ -659,10 +668,7 @@ int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) for (i = 0; i < 8; i++) addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; - spin_lock_bh(&card->ip_lock); - rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); - spin_unlock_bh(&card->ip_lock); - return rc; + return qeth_l3_modify_ip(card, &addr, add); } static int qeth_l3_register_addr_entry(struct qeth_card *card, @@ -848,7 +854,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) QETH_CARD_TEXT(card, 3, "softipv6"); - if (card->info.type == QETH_CARD_TYPE_IQD) + if (IS_IQD(card)) goto out; rc = qeth_send_simple_setassparms(card, IPA_IPV6, @@ -1374,8 +1380,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, switch (hdr->hdr.l3.id) { case QETH_HEADER_TYPE_LAYER3: magic = *(__u16 *)skb->data; - if ((card->info.type == QETH_CARD_TYPE_IQD) && - (magic == ETH_P_AF_IUCV)) { + if (IS_IQD(card) && magic == ETH_P_AF_IUCV) { len = skb->len; dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, "FAKELL", len); @@ -1413,6 +1418,9 @@ static void qeth_l3_stop_card(struct qeth_card *card) qeth_set_allowed_threads(card, 0, 1); + cancel_work_sync(&card->rx_mode_work); + qeth_l3_drain_rx_mode_cache(card); + if (card->options.sniffer && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); @@ -1424,7 +1432,7 @@ static void qeth_l3_stop_card(struct qeth_card *card) } if (card->state == CARD_STATE_HARDSETUP) { qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); + qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); card->state = CARD_STATE_DOWN; } @@ -1451,7 +1459,7 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card) (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) return; - if (card->info.guestlan) { /* Guestlan trace */ + if (IS_VM_NIC(card)) { /* Guestlan trace */ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) qeth_setadp_promisc_mode(card); } else if (card->options.sniffer && /* HiperSockets trace */ @@ -1466,9 +1474,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card) } } -static void qeth_l3_set_rx_mode(struct net_device *dev) +static void qeth_l3_rx_mode_work(struct work_struct *work) { - struct qeth_card *card = dev->ml_priv; + struct qeth_card *card = container_of(work, struct qeth_card, + rx_mode_work); struct qeth_ipaddr *addr; struct hlist_node *tmp; int i, rc; @@ -1476,8 +1485,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) QETH_CARD_TEXT(card, 3, "setmulti"); if (!card->options.sniffer) { - spin_lock_bh(&card->mclock); - qeth_l3_add_multicast_ipv4(card); qeth_l3_add_multicast_ipv6(card); @@ -1505,8 +1512,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) } } - spin_unlock_bh(&card->mclock); - if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) return; } @@ -1551,7 +1556,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; * thus we say EOPNOTSUPP for this ARP function */ - if (card->info.guestlan) + if (IS_VM_NIC(card)) return -EOPNOTSUPP; if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; @@ -1783,7 +1788,7 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card, * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; * thus we say EOPNOTSUPP for this ARP function */ - if (card->info.guestlan) + if (IS_VM_NIC(card)) return -EOPNOTSUPP; if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; @@ -1816,7 +1821,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; * thus we say EOPNOTSUPP for this ARP function */ - if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) + if (IS_VM_NIC(card) || IS_IQD(card)) return -EOPNOTSUPP; if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; @@ -1913,13 +1918,7 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb) RTN_MULTICAST : RTN_UNICAST; default: /* ... and MAC address */ - if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, - skb->dev->broadcast)) - return RTN_BROADCAST; - if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) - return RTN_MULTICAST; - /* default to unicast */ - return RTN_UNICAST; + return qeth_get_ether_cast_type(skb); } } @@ -1977,19 +1976,14 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); } + l3_hdr->flags = qeth_l3_cast_type_to_flag(cast_type); + /* OSA only: */ if (!ipv) { - hdr->hdr.l3.flags = QETH_HDR_PASSTHRU; - if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, - skb->dev->broadcast)) - hdr->hdr.l3.flags |= QETH_CAST_BROADCAST; - else - hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ? - QETH_CAST_MULTICAST : QETH_CAST_UNICAST; + l3_hdr->flags |= QETH_HDR_PASSTHRU; return; } - hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type); rcu_read_lock(); if (ipv == 4) { struct rtable *rt = skb_rtable(skb); @@ -2007,7 +2001,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr; hdr->hdr.l3.flags |= QETH_HDR_IPV6; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; } rcu_read_unlock(); @@ -2030,7 +2024,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb) static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type) { - unsigned char eth_hdr[ETH_HLEN]; unsigned int hw_hdr_len; int rc; @@ -2040,45 +2033,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); if (rc) return rc; - skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN); skb_pull(skb, ETH_HLEN); qeth_l3_fixup_headers(skb); - rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header); - if (rc == -EBUSY) { - /* roll back to ETH header */ - skb_push(skb, ETH_HLEN); - skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN); - } - return rc; + return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header); } static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { - int cast_type = qeth_l3_get_cast_type(skb); struct qeth_card *card = dev->ml_priv; + u16 txq = skb_get_queue_mapping(skb); int ipv = qeth_get_ip_version(skb); struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; - int rc; - - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + int cast_type, rc; if (IS_IQD(card)) { + queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; + if (card->options.sniffer) goto tx_drop; if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || (card->options.cq == QETH_CQ_ENABLED && skb->protocol != htons(ETH_P_AF_IUCV))) goto tx_drop; + + if (txq == QETH_IQD_MCAST_TXQ) + cast_type = qeth_l3_get_cast_type(skb); + else + cast_type = RTN_UNICAST; + } else { + queue = card->qdio.out_qs[txq]; + cast_type = qeth_l3_get_cast_type(skb); } if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) goto tx_drop; - netif_stop_queue(dev); - if (ipv == 4 || IS_IQD(card)) rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); else @@ -2088,19 +2080,22 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (!rc) { QETH_TXQ_STAT_INC(queue, tx_packets); QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); - netif_wake_queue(dev); return NETDEV_TX_OK; - } else if (rc == -EBUSY) { - return NETDEV_TX_BUSY; - } /* else fall through */ + } tx_drop: QETH_TXQ_STAT_INC(queue, tx_dropped); kfree_skb(skb); - netif_wake_queue(dev); return NETDEV_TX_OK; } +static void qeth_l3_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + schedule_work(&card->rx_mode_work); +} + /* * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting * NOARP on the netdevice is no option because it also turns off neighbor @@ -2134,11 +2129,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, return qeth_features_check(skb, dev, features); } +static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb), + sb_dev); +} + +static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct qeth_card *card = dev->ml_priv; + + return qeth_get_priority_queue(card, skb); +} + static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_open = qeth_open, .ndo_stop = qeth_stop, .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_select_queue = qeth_l3_iqd_select_queue, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -2155,6 +2166,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_features_check = qeth_l3_osa_features_check, + .ndo_select_queue = qeth_l3_osa_select_queue, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, @@ -2171,8 +2183,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) unsigned int headroom; int rc; - if (card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX) { + if (IS_OSD(card) || IS_OSX(card)) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || (card->info.link_type == QETH_LINK_TYPE_HSTR)) { pr_info("qeth_l3: ignoring TR device\n"); @@ -2186,7 +2197,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) card->dev->dev_id = card->info.unique_id & 0xffff; - if (!card->info.guestlan) { + if (!IS_VM_NIC(card)) { card->dev->features |= NETIF_F_SG; card->dev->hw_features |= NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_IP_CSUM; @@ -2210,7 +2221,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) headroom = sizeof(struct qeth_hdr_tso); else headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; - } else if (card->info.type == QETH_CARD_TYPE_IQD) { + } else if (IS_IQD(card)) { card->dev->flags |= IFF_NOARP; card->dev->netdev_ops = &qeth_l3_netdev_ops; headroom = sizeof(struct qeth_hdr) - ETH_HLEN; @@ -2253,14 +2264,22 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) int rc; hash_init(card->ip_htable); + mutex_init(&card->ip_lock); + card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0, + dev_name(&gdev->dev)); + if (!card->cmd_wq) + return -ENOMEM; if (gdev->dev.type == &qeth_generic_devtype) { rc = qeth_l3_create_device_attributes(&gdev->dev); - if (rc) + if (rc) { + destroy_workqueue(card->cmd_wq); return rc; + } } hash_init(card->ip_mc_htable); + INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); return 0; } @@ -2280,6 +2299,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); + + flush_workqueue(card->cmd_wq); + destroy_workqueue(card->cmd_wq); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); } @@ -2517,20 +2539,40 @@ static int qeth_l3_handle_ip_event(struct qeth_card *card, { switch (event) { case NETDEV_UP: - spin_lock_bh(&card->ip_lock); - qeth_l3_add_ip(card, addr); - spin_unlock_bh(&card->ip_lock); + qeth_l3_modify_ip(card, addr, true); return NOTIFY_OK; case NETDEV_DOWN: - spin_lock_bh(&card->ip_lock); - qeth_l3_delete_ip(card, addr); - spin_unlock_bh(&card->ip_lock); + qeth_l3_modify_ip(card, addr, false); return NOTIFY_OK; default: return NOTIFY_DONE; } } +struct qeth_l3_ip_event_work { + struct work_struct work; + struct qeth_card *card; + struct qeth_ipaddr addr; +}; + +#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work) + +static void qeth_l3_add_ip_worker(struct work_struct *work) +{ + struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); + + qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true); + kfree(work); +} + +static void qeth_l3_delete_ip_worker(struct work_struct *work) +{ + struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); + + qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false); + kfree(work); +} + static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) { if (is_vlan_dev(dev)) @@ -2575,9 +2617,12 @@ static int qeth_l3_ip6_event(struct notifier_block *this, { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct net_device *dev = ifa->idev->dev; - struct qeth_ipaddr addr; + struct qeth_l3_ip_event_work *ip_work; struct qeth_card *card; + if (event != NETDEV_UP && event != NETDEV_DOWN) + return NOTIFY_DONE; + card = qeth_l3_get_card_from_dev(dev); if (!card) return NOTIFY_DONE; @@ -2585,11 +2630,23 @@ static int qeth_l3_ip6_event(struct notifier_block *this, if (!qeth_is_supported(card, IPA_IPV6)) return NOTIFY_DONE; - qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); - addr.u.a6.addr = ifa->addr; - addr.u.a6.pfxlen = ifa->prefix_len; + ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC); + if (!ip_work) + return NOTIFY_DONE; - return qeth_l3_handle_ip_event(card, &addr, event); + if (event == NETDEV_UP) + INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker); + else + INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker); + + ip_work->card = card; + qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL, + QETH_PROT_IPV6); + ip_work->addr.u.a6.addr = ifa->addr; + ip_work->addr.u.a6.pfxlen = ifa->prefix_len; + + queue_work(card->cmd_wq, &ip_work->work); + return NOTIFY_OK; } static struct notifier_block qeth_l3_ip6_notifier = { diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index cff518b0f904..2f73b33c9347 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -206,7 +206,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, if (!card) return -EINVAL; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) return -EPERM; if (card->options.cq == QETH_CQ_ENABLED) return -EPERM; @@ -258,7 +258,7 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, if (!card) return -EINVAL; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) return -EPERM; memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); @@ -276,7 +276,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, if (!card) return -EINVAL; - if (card->info.type != QETH_CARD_TYPE_IQD) + if (!IS_IQD(card)) return -EPERM; if (card->state != CARD_STATE_DOWN) return -EPERM; @@ -367,9 +367,9 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, if (card->ipato.enabled != enable) { card->ipato.enabled = enable; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } out: mutex_unlock(&card->conf_mutex); @@ -412,9 +412,9 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, if (card->ipato.invert4 != invert) { card->ipato.invert4 = invert; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } out: mutex_unlock(&card->conf_mutex); @@ -436,7 +436,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; /* add strlen for "/<mask>\n" */ entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); list_for_each_entry(ipatoe, &card->ipato.entries, entry) { if (ipatoe->proto != proto) continue; @@ -449,7 +449,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, i += snprintf(buf + i, PAGE_SIZE - i, "%s/%i\n", addr_str, ipatoe->mask_bits); } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); i += snprintf(buf + i, PAGE_SIZE - i, "\n"); return i; @@ -598,9 +598,9 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, if (card->ipato.invert6 != invert) { card->ipato.invert6 = invert; - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); qeth_l3_update_ipato(card); - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); } out: mutex_unlock(&card->conf_mutex); @@ -684,7 +684,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ - spin_lock_bh(&card->ip_lock); + mutex_lock(&card->ip_lock); hash_for_each(card->ip_htable, i, ipaddr, hnode) { if (ipaddr->proto != proto || ipaddr->type != type) continue; @@ -698,7 +698,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", addr_str); } - spin_unlock_bh(&card->ip_lock); + mutex_unlock(&card->ip_lock); str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); return str_len; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 74c328321889..6a3076881321 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -66,6 +66,7 @@ struct virtio_ccw_device { bool device_lost; unsigned int config_ready; void *airq_info; + u64 dma_mask; }; struct vq_info_block_legacy { @@ -108,7 +109,6 @@ struct virtio_rev_info { struct virtio_ccw_vq_info { struct virtqueue *vq; int num; - void *queue; union { struct vq_info_block s; struct vq_info_block_legacy l; @@ -182,7 +182,7 @@ static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) write_unlock_irqrestore(&info->lock, flags); } -static void virtio_airq_handler(struct airq_struct *airq) +static void virtio_airq_handler(struct airq_struct *airq, bool floating) { struct airq_info *info = container_of(airq, struct airq_info, airq); unsigned long ai; @@ -423,7 +423,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); struct virtio_ccw_vq_info *info = vq->priv; unsigned long flags; - unsigned long size; int ret; unsigned int index = vq->index; @@ -461,8 +460,6 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) ret, index); vring_del_virtqueue(vq); - size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); - free_pages_exact(info->queue, size); kfree(info->info_block); kfree(info); } @@ -494,8 +491,9 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, int err; struct virtqueue *vq = NULL; struct virtio_ccw_vq_info *info; - unsigned long size = 0; /* silence the compiler */ + u64 queue; unsigned long flags; + bool may_reduce; /* Allocate queue. */ info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); @@ -516,37 +514,34 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, err = info->num; goto out_err; } - size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); - info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); - if (info->queue == NULL) { - dev_warn(&vcdev->cdev->dev, "no queue\n"); - err = -ENOMEM; - goto out_err; - } + may_reduce = vcdev->revision > 0; + vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, + vdev, true, may_reduce, ctx, + virtio_ccw_kvm_notify, callback, name); - vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, - true, ctx, info->queue, virtio_ccw_kvm_notify, - callback, name); if (!vq) { /* For now, we fail if we can't get the requested size. */ dev_warn(&vcdev->cdev->dev, "no vq\n"); err = -ENOMEM; goto out_err; } + /* it may have been reduced */ + info->num = virtqueue_get_vring_size(vq); /* Register it with the host. */ + queue = virtqueue_get_desc_addr(vq); if (vcdev->revision == 0) { - info->info_block->l.queue = (__u64)info->queue; + info->info_block->l.queue = queue; info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; info->info_block->l.index = i; info->info_block->l.num = info->num; ccw->count = sizeof(info->info_block->l); } else { - info->info_block->s.desc = (__u64)info->queue; + info->info_block->s.desc = queue; info->info_block->s.index = i; info->info_block->s.num = info->num; - info->info_block->s.avail = (__u64)virtqueue_get_avail(vq); - info->info_block->s.used = (__u64)virtqueue_get_used(vq); + info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq); + info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq); ccw->count = sizeof(info->info_block->s); } ccw->cmd_code = CCW_CMD_SET_VQ; @@ -572,8 +567,6 @@ out_err: if (vq) vring_del_virtqueue(vq); if (info) { - if (info->queue) - free_pages_exact(info->queue, size); kfree(info->info_block); } kfree(info); @@ -780,12 +773,8 @@ out_free: static void ccw_transport_features(struct virtio_device *vdev) { /* - * Packed ring isn't enabled on virtio_ccw for now, - * because virtio_ccw uses some legacy accessors, - * e.g. virtqueue_get_avail() and virtqueue_get_used() - * which aren't available in packed ring currently. + * Currently nothing to do here. */ - __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED); } static int virtio_ccw_finalize_features(struct virtio_device *vdev) @@ -1266,6 +1255,16 @@ static int virtio_ccw_online(struct ccw_device *cdev) ret = -ENOMEM; goto out_free; } + + vcdev->vdev.dev.parent = &cdev->dev; + cdev->dev.dma_mask = &vcdev->dma_mask; + /* we are fine with common virtio infrastructure using 64 bit DMA */ + ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n"); + goto out_free; + } + vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), GFP_DMA | GFP_KERNEL); if (!vcdev->config_block) { @@ -1280,7 +1279,6 @@ static int virtio_ccw_online(struct ccw_device *cdev) vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ - vcdev->vdev.dev.parent = &cdev->dev; vcdev->vdev.dev.release = virtio_ccw_release_dev; vcdev->vdev.config = &virtio_ccw_config_ops; vcdev->cdev = cdev; |