summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c16
-rw-r--r--drivers/s390/block/dasd_devmap.c294
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk.c7
-rw-r--r--drivers/s390/char/Makefile16
-rw-r--r--drivers/s390/char/con3270.c2
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.c32
-rw-r--r--drivers/s390/char/sclp.h40
-rw-r--r--drivers/s390/char/sclp_early.c201
-rw-r--r--drivers/s390/char/sclp_early_core.c208
-rw-r--r--drivers/s390/char/zcore.c3
-rw-r--r--drivers/s390/cio/chp.c13
-rw-r--r--drivers/s390/cio/chp.h2
-rw-r--r--drivers/s390/cio/chsc.c48
-rw-r--r--drivers/s390/cio/chsc.h2
-rw-r--r--drivers/s390/cio/cmf.c10
-rw-r--r--drivers/s390/cio/css.c209
-rw-r--r--drivers/s390/cio/css.h13
-rw-r--r--drivers/s390/cio/ioasm.c8
-rw-r--r--drivers/s390/cio/qdio_main.c5
-rw-r--r--drivers/s390/cio/qdio_thinint.c19
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/ap_asm.h10
-rw-r--r--drivers/s390/crypto/ap_bus.c67
-rw-r--r--drivers/s390/crypto/ap_card.c26
-rw-r--r--drivers/s390/crypto/ap_queue.c23
-rw-r--r--drivers/s390/crypto/pkey_api.c1148
-rw-r--r--drivers/s390/crypto/zcrypt_api.c20
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c135
-rw-r--r--drivers/s390/net/qeth_core_mpc.h17
-rw-r--r--drivers/s390/net/qeth_l2_main.c189
-rw-r--r--drivers/s390/net/qeth_l3_main.c15
-rw-r--r--drivers/s390/net/qeth_l3_sys.c33
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c8
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c2
41 files changed, 2114 insertions, 751 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0e3fdfdbd098..6fb3fd5efc11 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1712,8 +1712,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* check for for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
- device->discipline->check_attention(device, irb->esw.esw1.lpum);
- dasd_put_device(device);
+ if (!IS_ERR(device)) {
+ device->discipline->check_attention(device,
+ irb->esw.esw1.lpum);
+ dasd_put_device(device);
+ }
}
if (!cqr)
@@ -3598,10 +3601,11 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* empty
*/
/* sync blockdev and partitions */
- rc = fsync_bdev(device->block->bdev);
- if (rc != 0)
- goto interrupted;
-
+ if (device->block) {
+ rc = fsync_bdev(device->block->bdev);
+ if (rc != 0)
+ goto interrupted;
+ }
/* schedule device tasklet and wait for completion */
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index dd46e96a3034..1164b51d09f3 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -26,6 +26,7 @@
/* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:"
#define DASD_BUS_ID_SIZE 20
+#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
@@ -76,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx);
* it is named 'dasd' to directly be filled by insmod with the comma separated
* strings when running as a module.
*/
-static char *dasd[256];
+static char *dasd[DASD_MAX_PARAMS];
module_param_array(dasd, charp, NULL, S_IRUGO);
/*
@@ -104,18 +105,19 @@ dasd_hash_busid(const char *bus_id)
}
#ifndef MODULE
-/*
- * The parameter parsing functions for builtin-drivers are called
- * before kmalloc works. Store the pointers to the parameters strings
- * into dasd[] for later processing.
- */
-static int __init
-dasd_call_setup(char *str)
+static int __init dasd_call_setup(char *opt)
{
- static int count = 0;
+ static int i __initdata;
+ char *tmp;
+
+ while (i < DASD_MAX_PARAMS) {
+ tmp = strsep(&opt, ",");
+ if (!tmp)
+ break;
+
+ dasd[i++] = tmp;
+ }
- if (count < 256)
- dasd[count++] = str;
return 1;
}
@@ -127,14 +129,13 @@ __setup ("dasd=", dasd_call_setup);
/*
* Read a device busid/devno from a string.
*/
-static int
-
-dasd_busid(char **str, int *id0, int *id1, int *devno)
+static int __init dasd_busid(char *str, int *id0, int *id1, int *devno)
{
- int val, old_style;
+ unsigned int val;
+ char *tok;
/* Interpret ipldev busid */
- if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
+ if (strncmp(DASD_IPLDEV, str, strlen(DASD_IPLDEV)) == 0) {
if (ipl_info.type != IPL_TYPE_CCW) {
pr_err("The IPL device is not a CCW device\n");
return -EINVAL;
@@ -142,63 +143,50 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
*id0 = 0;
*id1 = ipl_info.data.ccw.dev_id.ssid;
*devno = ipl_info.data.ccw.dev_id.devno;
- *str += strlen(DASD_IPLDEV);
return 0;
}
- /* check for leading '0x' */
- old_style = 0;
- if ((*str)[0] == '0' && (*str)[1] == 'x') {
- *str += 2;
- old_style = 1;
- }
- if (!isxdigit((*str)[0])) /* We require at least one hex digit */
- return -EINVAL;
- val = simple_strtoul(*str, str, 16);
- if (old_style || (*str)[0] != '.') {
+
+ /* Old style 0xXXXX or XXXX */
+ if (!kstrtouint(str, 16, &val)) {
*id0 = *id1 = 0;
if (val < 0 || val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
}
+
/* New style x.y.z busid */
- if (val < 0 || val > 0xff)
+ tok = strsep(&str, ".");
+ if (kstrtouint(tok, 16, &val) || val > 0xff)
return -EINVAL;
*id0 = val;
- (*str)++;
- if (!isxdigit((*str)[0])) /* We require at least one hex digit */
- return -EINVAL;
- val = simple_strtoul(*str, str, 16);
- if (val < 0 || val > 0xff || (*str)++[0] != '.')
+
+ tok = strsep(&str, ".");
+ if (kstrtouint(tok, 16, &val) || val > 0xff)
return -EINVAL;
*id1 = val;
- if (!isxdigit((*str)[0])) /* We require at least one hex digit */
- return -EINVAL;
- val = simple_strtoul(*str, str, 16);
- if (val < 0 || val > 0xffff)
+
+ tok = strsep(&str, ".");
+ if (kstrtouint(tok, 16, &val) || val > 0xffff)
return -EINVAL;
*devno = val;
+
return 0;
}
/*
- * Read colon separated list of dasd features. Currently there is
- * only one: "ro" for read-only devices. The default feature set
- * is empty (value 0).
+ * Read colon separated list of dasd features.
*/
-static int
-dasd_feature_list(char *str, char **endp)
+static int __init dasd_feature_list(char *str)
{
int features, len, rc;
+ features = 0;
rc = 0;
- if (*str != '(') {
- *endp = str;
+
+ if (!str)
return DASD_FEATURE_DEFAULT;
- }
- str++;
- features = 0;
while (1) {
for (len = 0;
@@ -223,15 +211,8 @@ dasd_feature_list(char *str, char **endp)
break;
str++;
}
- if (*str != ')') {
- pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
- rc = -EINVAL;
- } else
- str++;
- *endp = str;
- if (rc != 0)
- return rc;
- return features;
+
+ return rc ? : features;
}
/*
@@ -240,48 +221,38 @@ dasd_feature_list(char *str, char **endp)
* action and return a pointer to the residual string. If the first element
* could not be matched to any keyword then return an error code.
*/
-static char *
-dasd_parse_keyword( char *parsestring ) {
-
- char *nextcomma, *residual_str;
- int length;
+static int __init dasd_parse_keyword(char *keyword)
+{
+ int length = strlen(keyword);
- nextcomma = strchr(parsestring,',');
- if (nextcomma) {
- length = nextcomma - parsestring;
- residual_str = nextcomma + 1;
- } else {
- length = strlen(parsestring);
- residual_str = parsestring + length;
- }
- if (strncmp("autodetect", parsestring, length) == 0) {
+ if (strncmp("autodetect", keyword, length) == 0) {
dasd_autodetect = 1;
pr_info("The autodetection mode has been activated\n");
- return residual_str;
+ return 0;
}
- if (strncmp("probeonly", parsestring, length) == 0) {
+ if (strncmp("probeonly", keyword, length) == 0) {
dasd_probeonly = 1;
pr_info("The probeonly mode has been activated\n");
- return residual_str;
+ return 0;
}
- if (strncmp("nopav", parsestring, length) == 0) {
+ if (strncmp("nopav", keyword, length) == 0) {
if (MACHINE_IS_VM)
pr_info("'nopav' is not supported on z/VM\n");
else {
dasd_nopav = 1;
pr_info("PAV support has be deactivated\n");
}
- return residual_str;
+ return 0;
}
- if (strncmp("nofcx", parsestring, length) == 0) {
+ if (strncmp("nofcx", keyword, length) == 0) {
dasd_nofcx = 1;
pr_info("High Performance FICON support has been "
"deactivated\n");
- return residual_str;
+ return 0;
}
- if (strncmp("fixedbuffers", parsestring, length) == 0) {
+ if (strncmp("fixedbuffers", keyword, length) == 0) {
if (dasd_page_cache)
- return residual_str;
+ return 0;
dasd_page_cache =
kmem_cache_create("dasd_page_cache", PAGE_SIZE,
PAGE_SIZE, SLAB_CACHE_DMA,
@@ -292,107 +263,126 @@ dasd_parse_keyword( char *parsestring ) {
else
DBF_EVENT(DBF_INFO, "%s",
"turning on fixed buffer mode");
- return residual_str;
- }
- return ERR_PTR(-EINVAL);
+ return 0;
+ }
+
+ return -EINVAL;
}
/*
- * Try to interprete the first element on the comma separated parse string
- * as a device number or a range of devices. If the interpretation is
- * successful, create the matching dasd_devmap entries and return a pointer
- * to the residual string.
- * If interpretation fails or in case of an error, return an error code.
+ * Split a string of a device range into its pieces and return the from, to, and
+ * feature parts separately.
+ * e.g.:
+ * 0.0.1234-0.0.5678(ro:erplog) -> from: 0.0.1234 to: 0.0.5678 features: ro:erplog
+ * 0.0.8765(raw) -> from: 0.0.8765 to: null features: raw
+ * 0x4321 -> from: 0x4321 to: null features: null
*/
-static char *
-dasd_parse_range( char *parsestring ) {
+static int __init dasd_evaluate_range_param(char *range, char **from_str,
+ char **to_str, char **features_str)
+{
+ int rc = 0;
+
+ /* Do we have a range or a single device? */
+ if (strchr(range, '-')) {
+ *from_str = strsep(&range, "-");
+ *to_str = strsep(&range, "(");
+ *features_str = strsep(&range, ")");
+ } else {
+ *from_str = strsep(&range, "(");
+ *features_str = strsep(&range, ")");
+ }
+ if (*features_str && !range) {
+ pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Try to interprete the range string as a device number or a range of devices.
+ * If the interpretation is successful, create the matching dasd_devmap entries.
+ * If interpretation fails or in case of an error, return an error code.
+ */
+static int __init dasd_parse_range(const char *range)
+{
struct dasd_devmap *devmap;
int from, from_id0, from_id1;
int to, to_id0, to_id1;
- int features, rc;
- char bus_id[DASD_BUS_ID_SIZE+1], *str;
-
- str = parsestring;
- rc = dasd_busid(&str, &from_id0, &from_id1, &from);
- if (rc == 0) {
- to = from;
- to_id0 = from_id0;
- to_id1 = from_id1;
- if (*str == '-') {
- str++;
- rc = dasd_busid(&str, &to_id0, &to_id1, &to);
+ int features;
+ char bus_id[DASD_BUS_ID_SIZE + 1];
+ char *features_str = NULL;
+ char *from_str = NULL;
+ char *to_str = NULL;
+ size_t len = strlen(range) + 1;
+ char tmp[len];
+
+ strlcpy(tmp, range, len);
+
+ if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str))
+ goto out_err;
+
+ if (dasd_busid(from_str, &from_id0, &from_id1, &from))
+ goto out_err;
+
+ to = from;
+ to_id0 = from_id0;
+ to_id1 = from_id1;
+ if (to_str) {
+ if (dasd_busid(to_str, &to_id0, &to_id1, &to))
+ goto out_err;
+ if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
+ pr_err("%s is not a valid device range\n", range);
+ goto out_err;
}
}
- if (rc == 0 &&
- (from_id0 != to_id0 || from_id1 != to_id1 || from > to))
- rc = -EINVAL;
- if (rc) {
- pr_err("%s is not a valid device range\n", parsestring);
- return ERR_PTR(rc);
- }
- features = dasd_feature_list(str, &str);
+
+ features = dasd_feature_list(features_str);
if (features < 0)
- return ERR_PTR(-EINVAL);
+ goto out_err;
/* each device in dasd= parameter should be set initially online */
features |= DASD_FEATURE_INITIAL_ONLINE;
while (from <= to) {
- sprintf(bus_id, "%01x.%01x.%04x",
- from_id0, from_id1, from++);
+ sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
devmap = dasd_add_busid(bus_id, features);
if (IS_ERR(devmap))
- return (char *)devmap;
+ return PTR_ERR(devmap);
}
- if (*str == ',')
- return str + 1;
- if (*str == '\0')
- return str;
- pr_warn("The dasd= parameter value %s has an invalid ending\n", str);
- return ERR_PTR(-EINVAL);
-}
-static char *
-dasd_parse_next_element( char *parsestring ) {
- char * residual_str;
- residual_str = dasd_parse_keyword(parsestring);
- if (!IS_ERR(residual_str))
- return residual_str;
- residual_str = dasd_parse_range(parsestring);
- return residual_str;
+ return 0;
+
+out_err:
+ return -EINVAL;
}
/*
* Parse parameters stored in dasd[]
* The 'dasd=...' parameter allows to specify a comma separated list of
- * keywords and device ranges. When the dasd driver is build into the kernel,
- * the complete list will be stored as one element of the dasd[] array.
- * When the dasd driver is build as a module, then the list is broken into
- * it's elements and each dasd[] entry contains one element.
+ * keywords and device ranges. The parameters in that list will be stored as
+ * separate elementes in dasd[].
*/
-int
-dasd_parse(void)
+int __init dasd_parse(void)
{
int rc, i;
- char *parsestring;
+ char *cur;
rc = 0;
- for (i = 0; i < 256; i++) {
- if (dasd[i] == NULL)
+ for (i = 0; i < DASD_MAX_PARAMS; i++) {
+ cur = dasd[i];
+ if (!cur)
break;
- parsestring = dasd[i];
- /* loop over the comma separated list in the parsestring */
- while (*parsestring) {
- parsestring = dasd_parse_next_element(parsestring);
- if(IS_ERR(parsestring)) {
- rc = PTR_ERR(parsestring);
- break;
- }
- }
- if (rc) {
- DBF_EVENT(DBF_ALERT, "%s", "invalid range found");
+ if (*cur == '\0')
+ continue;
+
+ rc = dasd_parse_keyword(cur);
+ if (rc)
+ rc = dasd_parse_range(cur);
+
+ if (rc)
break;
- }
}
+
return rc;
}
@@ -1528,14 +1518,12 @@ dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device))
return -ENODEV;
- if ((kstrtoul(buf, 10, &val) != 0) ||
- (val > DASD_THRHLD_MAX) || val == 0) {
+ if (kstrtoul(buf, 10, &val) != 0 || val > DASD_THRHLD_MAX) {
dasd_put_device(device);
return -EINVAL;
}
spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
- if (val)
- device->path_thrhld = val;
+ device->path_thrhld = val;
spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
dasd_put_device(device);
return count;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ade04216c970..0b38217f8147 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2543,8 +2543,8 @@ dasd_eckd_build_format(struct dasd_device *base,
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)(addr_t) ect;
- ccw++;
+ ccw->cda = (__u32)(addr_t) ect;
+ ccw++;
}
}
}
@@ -4864,7 +4864,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
break;
case 3: /* tsa_intrg */
len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.intrg.: not supportet yet\n");
+ " tsb->tsa.intrg.: not supported yet\n");
break;
}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 24be210c10e5..518dba2732d5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -805,7 +805,7 @@ struct dasd_device *dasd_device_from_devindex(int);
void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
-int dasd_parse(void);
+int dasd_parse(void) __init;
int dasd_busid_known(const char *);
/* externals in dasd_gendisk.c */
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 9d66b4fb174b..415d10a67b7a 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
dev_info = bdev->bd_disk->private_data;
if (!dev_info)
return -ENODEV;
- dev_sz = dev_info->end - dev_info->start;
+ dev_sz = dev_info->end - dev_info->start + 1;
offset = secnum * 512;
*kaddr = (void *) dev_info->start + offset;
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9f16ea6964ec..152de6817875 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -300,13 +300,6 @@ static void scm_blk_request(struct request_queue *rq)
struct request *req;
while ((req = blk_peek_request(rq))) {
- if (req->cmd_type != REQ_TYPE_FS) {
- blk_start_request(req);
- blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
- __blk_end_request_all(req, -EIO);
- continue;
- }
-
if (!scm_permit_request(bdev, req))
goto out;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 41e28b23b26a..0c443e26835d 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,9 +2,23 @@
# S/390 character devices
#
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early setup code
+CFLAGS_REMOVE_sclp_early_core.o = $(CC_FLAGS_FTRACE)
+endif
+
+GCOV_PROFILE_sclp_early_core.o := n
+KCOV_INSTRUMENT_sclp_early_core.o := n
+UBSAN_SANITIZE_sclp_early_core.o := n
+
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
+CFLAGS_sclp_early_core.o += -march=z900
+endif
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
- sclp_early.o
+ sclp_early.o sclp_early_core.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 285b4006f44b..8522cfce5b4e 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -31,7 +31,7 @@
static struct raw3270_fn con3270_fn;
-static bool auto_update = 1;
+static bool auto_update = true;
module_param(auto_update, bool, 0);
/*
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index a2da898ce90f..710f2292911d 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -82,7 +82,7 @@ static LIST_HEAD(raw3270_devices);
static int raw3270_registered;
/* Module parameters */
-static bool tubxcorrect = 0;
+static bool tubxcorrect;
module_param(tubxcorrect, bool, 0);
/*
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 272898225dbb..9c471ea1b99c 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -94,13 +94,6 @@ static struct timer_list sclp_request_timer;
/* Timer for queued requests. */
static struct timer_list sclp_queue_timer;
-/* Internal state: is the driver initialized? */
-static volatile enum sclp_init_state_t {
- sclp_init_state_uninitialized,
- sclp_init_state_initializing,
- sclp_init_state_initialized
-} sclp_init_state = sclp_init_state_uninitialized;
-
/* Internal state: is a request active at the sclp? */
static volatile enum sclp_running_state_t {
sclp_running_state_idle,
@@ -147,31 +140,6 @@ static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
static int sclp_init(void);
-/* Perform service call. Return 0 on success, non-zero otherwise. */
-int
-sclp_service_call(sclp_cmdw_t command, void *sccb)
-{
- int cc = 4; /* Initialize for program check handling */
-
- asm volatile(
- "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
- "1: ipm %0\n"
- " srl %0,28\n"
- "2:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- : "+&d" (cc) : "d" (command), "a" (__pa(sccb))
- : "cc", "memory");
- if (cc == 4)
- return -EINVAL;
- if (cc == 3)
- return -EIO;
- if (cc == 2)
- return -EBUSY;
- return 0;
-}
-
-
static void
__sclp_queue_read_req(void)
{
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index e1fc7eb043d6..53b5d1b9761a 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -204,19 +204,57 @@ void sclp_unregister(struct sclp_register *reg);
int sclp_remove_processed(struct sccb_header *sccb);
int sclp_deactivate(void);
int sclp_reactivate(void);
-int sclp_service_call(sclp_cmdw_t command, void *sccb);
int sclp_sync_request(sclp_cmdw_t command, void *sccb);
int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
int sclp_sdias_init(void);
void sclp_sdias_exit(void);
+enum {
+ sclp_init_state_uninitialized,
+ sclp_init_state_initializing,
+ sclp_init_state_initialized
+};
+
+extern int sclp_init_state;
extern int sclp_console_pages;
extern int sclp_console_drop;
extern unsigned long sclp_console_full;
+extern char sclp_early_sccb[PAGE_SIZE];
+
+void sclp_early_wait_irq(void);
+int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
+unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
+int sclp_early_set_event_mask(struct init_sccb *sccb,
+ unsigned long receive_mask,
+ unsigned long send_mask);
+
/* useful inlines */
+/* Perform service call. Return 0 on success, non-zero otherwise. */
+static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
+{
+ int cc = 4; /* Initialize for program check handling */
+
+ asm volatile(
+ "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
+ "1: ipm %0\n"
+ " srl %0,28\n"
+ "2:\n"
+ EX_TABLE(0b, 2b)
+ EX_TABLE(1b, 2b)
+ : "+&d" (cc) : "d" (command), "a" ((unsigned long)sccb)
+ : "cc", "memory");
+ if (cc == 4)
+ return -EINVAL;
+ if (cc == 3)
+ return -EIO;
+ if (cc == 2)
+ return -EBUSY;
+ return 0;
+}
+
/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
/* translate single character from ASCII to EBCDIC */
static inline unsigned char
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index f8e46c22e641..519ec1787117 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -55,46 +55,23 @@ struct read_info_sccb {
u8 _pad_128[4096 - 128]; /* 128-4095 */
} __packed __aligned(PAGE_SIZE);
-static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
static struct sclp_ipl_info sclp_ipl_info;
struct sclp_info sclp;
EXPORT_SYMBOL(sclp);
-static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
+static int __init sclp_early_read_info(struct read_info_sccb *sccb)
{
- int rc;
-
- __ctl_set_bit(0, 9);
- rc = sclp_service_call(cmd, sccb);
- if (rc)
- goto out;
- __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
- PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
- local_irq_disable();
-out:
- /* Contents of the sccb might have changed. */
- barrier();
- __ctl_clear_bit(0, 9);
- return rc;
-}
-
-static int __init sclp_read_info_early(struct read_info_sccb *sccb)
-{
- int rc, i;
+ int i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
for (i = 0; i < ARRAY_SIZE(commands); i++) {
- do {
- memset(sccb, 0, sizeof(*sccb));
- sccb->header.length = sizeof(*sccb);
- sccb->header.function_code = 0x80;
- sccb->header.control_mask[2] = 0x80;
- rc = sclp_cmd_sync_early(commands[i], sccb);
- } while (rc == -EBUSY);
-
- if (rc)
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ sccb->header.function_code = 0x80;
+ sccb->header.control_mask[2] = 0x80;
+ if (sclp_early_cmd(commands[i], sccb))
break;
if (sccb->header.response_code == 0x10)
return 0;
@@ -104,12 +81,12 @@ static int __init sclp_read_info_early(struct read_info_sccb *sccb)
return -EIO;
}
-static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
+static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
{
struct sclp_core_entry *cpue;
u16 boot_cpu_address, cpu;
- if (sclp_read_info_early(sccb))
+ if (sclp_early_read_info(sccb))
return;
sclp.facilities = sccb->facilities;
@@ -172,141 +149,96 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
}
/*
- * This function will be called after sclp_facilities_detect(), which gets
- * called from early.c code. The sclp_facilities_detect() function retrieves
+ * This function will be called after sclp_early_facilities_detect(), which gets
+ * called from early.c code. The sclp_early_facilities_detect() function retrieves
* and saves the IPL information.
*/
-void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
+void __init sclp_early_get_ipl_info(struct sclp_ipl_info *info)
{
*info = sclp_ipl_info;
}
-static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
-{
- int rc;
-
- do {
- rc = sclp_cmd_sync_early(cmd, sccb);
- } while (rc == -EBUSY);
-
- if (rc)
- return -EIO;
- if (((struct sccb_header *) sccb)->response_code != 0x0020)
- return -EIO;
- return 0;
-}
-
-static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
-{
- memset(sccb, 0, sizeof(*sccb));
-
- sccb->hdr.length = sizeof(*sccb);
- sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
- sccb->evbuf.hdr.type = EVTYP_SDIAS;
- sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
- sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
- sccb->evbuf.event_id = 4712;
- sccb->evbuf.dbs = 1;
-}
+static struct sclp_core_info sclp_early_core_info __initdata;
+static int sclp_early_core_info_valid __initdata;
-static int __init sclp_set_event_mask(struct init_sccb *sccb,
- unsigned long receive_mask,
- unsigned long send_mask)
+static void __init sclp_early_init_core_info(struct read_cpu_info_sccb *sccb)
{
- memset(sccb, 0, sizeof(*sccb));
- sccb->header.length = sizeof(*sccb);
- sccb->mask_length = sizeof(sccb_mask_t);
- sccb->receive_mask = receive_mask;
- sccb->send_mask = send_mask;
- return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
-}
-
-static struct sclp_core_info sclp_core_info_early __initdata;
-static int sclp_core_info_early_valid __initdata;
-
-static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb)
-{
- int rc;
-
if (!SCLP_HAS_CPU_INFO)
return;
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
- do {
- rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
- } while (rc == -EBUSY);
- if (rc)
+ if (sclp_early_cmd(SCLP_CMDW_READ_CPU_INFO, sccb))
return;
if (sccb->header.response_code != 0x0010)
return;
- sclp_fill_core_info(&sclp_core_info_early, sccb);
- sclp_core_info_early_valid = 1;
+ sclp_fill_core_info(&sclp_early_core_info, sccb);
+ sclp_early_core_info_valid = 1;
}
-int __init _sclp_get_core_info_early(struct sclp_core_info *info)
+int __init sclp_early_get_core_info(struct sclp_core_info *info)
{
- if (!sclp_core_info_early_valid)
+ if (!sclp_early_core_info_valid)
return -EIO;
- *info = sclp_core_info_early;
+ *info = sclp_early_core_info;
return 0;
}
-static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
+static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb)
{
- sccb_init_eq_size(sccb);
- if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->hdr.length = sizeof(*sccb);
+ sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
+ sccb->evbuf.hdr.type = EVTYP_SDIAS;
+ sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
+ sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
+ sccb->evbuf.event_id = 4712;
+ sccb->evbuf.dbs = 1;
+ if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
+ return -EIO;
+ if (sccb->hdr.response_code != 0x20)
return -EIO;
if (sccb->evbuf.blk_cnt == 0)
return 0;
return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
-static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
+static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb)
{
memset(sccb, 0, PAGE_SIZE);
- sccb->length = PAGE_SIZE;
- if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
+ sccb->hdr.length = PAGE_SIZE;
+ if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb))
return -EIO;
- if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
+ if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220))
+ return -EIO;
+ if (sccb->evbuf.blk_cnt == 0)
return 0;
- return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
+ return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
-static void __init sclp_hsa_size_detect(void *sccb)
+static void __init sclp_early_hsa_size_detect(void *sccb)
{
- long size;
+ unsigned long flags;
+ long size = -EIO;
- /* First try synchronous interface (LPAR) */
- if (sclp_set_event_mask(sccb, 0, 0x40000010))
- return;
- size = sclp_hsa_size_init(sccb);
- if (size < 0)
- return;
- if (size != 0)
+ raw_local_irq_save(flags);
+ if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK))
goto out;
- /* Then try asynchronous interface (z/VM) */
- if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
- return;
- size = sclp_hsa_size_init(sccb);
- if (size < 0)
- return;
- size = sclp_hsa_copy_wait(sccb);
- if (size < 0)
- return;
+ size = sclp_early_hsa_size_init(sccb);
+ /* First check for synchronous response (LPAR) */
+ if (size)
+ goto out_mask;
+ if (!(S390_lowcore.ext_params & 1))
+ sclp_early_wait_irq();
+ size = sclp_early_hsa_copy_wait(sccb);
+out_mask:
+ sclp_early_set_event_mask(sccb, 0, 0);
out:
- sclp.hsa_size = size;
-}
-
-static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
-{
- if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
- return 0;
- if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
- return 0;
- return 1;
+ raw_local_irq_restore(flags);
+ if (size > 0)
+ sclp.hsa_size = size;
}
-static void __init sclp_console_detect(struct init_sccb *sccb)
+static void __init sclp_early_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
return;
@@ -314,21 +246,22 @@ static void __init sclp_console_detect(struct init_sccb *sccb)
if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
sclp.has_vt220 = 1;
- if (sclp_con_check_linemode(sccb))
+ if (sclp_early_con_check_linemode(sccb))
sclp.has_linemode = 1;
}
void __init sclp_early_detect(void)
{
- void *sccb = &sccb_early;
+ void *sccb = &sclp_early_sccb;
- sclp_facilities_detect(sccb);
- sclp_init_core_info_early(sccb);
- sclp_hsa_size_detect(sccb);
+ sclp_early_facilities_detect(sccb);
+ sclp_early_init_core_info(sccb);
+ sclp_early_hsa_size_detect(sccb);
- /* Turn off SCLP event notifications. Also save remote masks in the
+ /*
+ * Turn off SCLP event notifications. Also save remote masks in the
* sccb. These are sufficient to detect sclp console capabilities.
*/
- sclp_set_event_mask(sccb, 0, 0);
- sclp_console_detect(sccb);
+ sclp_early_set_event_mask(sccb, 0, 0);
+ sclp_early_console_detect(sccb);
}
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
new file mode 100644
index 000000000000..5029cc87e80f
--- /dev/null
+++ b/drivers/s390/char/sclp_early_core.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright IBM Corp. 2015
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <asm/processor.h>
+#include <asm/lowcore.h>
+#include <asm/ebcdic.h>
+#include <asm/irq.h>
+#include "sclp.h"
+#include "sclp_rw.h"
+
+char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(data);
+int sclp_init_state __section(data) = sclp_init_state_uninitialized;
+
+void sclp_early_wait_irq(void)
+{
+ unsigned long psw_mask, addr;
+ psw_t psw_ext_save, psw_wait;
+ union ctlreg0 cr0, cr0_new;
+
+ __ctl_store(cr0.val, 0, 0);
+ cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
+ cr0_new.lap = 0;
+ cr0_new.sssm = 1;
+ __ctl_load(cr0_new.val, 0, 0);
+
+ psw_ext_save = S390_lowcore.external_new_psw;
+ psw_mask = __extract_psw();
+ S390_lowcore.external_new_psw.mask = psw_mask;
+ psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
+ S390_lowcore.ext_int_code = 0;
+
+ do {
+ asm volatile(
+ " larl %[addr],0f\n"
+ " stg %[addr],%[psw_wait_addr]\n"
+ " stg %[addr],%[psw_ext_addr]\n"
+ " lpswe %[psw_wait]\n"
+ "0:\n"
+ : [addr] "=&d" (addr),
+ [psw_wait_addr] "=Q" (psw_wait.addr),
+ [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
+ : [psw_wait] "Q" (psw_wait)
+ : "cc", "memory");
+ } while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
+
+ S390_lowcore.external_new_psw = psw_ext_save;
+ __ctl_load(cr0.val, 0, 0);
+}
+
+int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
+{
+ unsigned long flags;
+ int rc;
+
+ raw_local_irq_save(flags);
+ rc = sclp_service_call(cmd, sccb);
+ if (rc)
+ goto out;
+ sclp_early_wait_irq();
+out:
+ raw_local_irq_restore(flags);
+ return rc;
+}
+
+struct write_sccb {
+ struct sccb_header header;
+ struct msg_buf msg;
+} __packed;
+
+/* Output multi-line text using SCLP Message interface. */
+static void sclp_early_print_lm(const char *str, unsigned int len)
+{
+ unsigned char *ptr, *end, ch;
+ unsigned int count, offset;
+ struct write_sccb *sccb;
+ struct msg_buf *msg;
+ struct mdb *mdb;
+ struct mto *mto;
+ struct go *go;
+
+ sccb = (struct write_sccb *) &sclp_early_sccb;
+ end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1;
+ memset(sccb, 0, sizeof(*sccb));
+ ptr = (unsigned char *) &sccb->msg.mdb.mto;
+ offset = 0;
+ do {
+ for (count = sizeof(*mto); offset < len; count++) {
+ ch = str[offset++];
+ if ((ch == 0x0a) || (ptr + count > end))
+ break;
+ ptr[count] = _ascebc[ch];
+ }
+ mto = (struct mto *) ptr;
+ memset(mto, 0, sizeof(*mto));
+ mto->length = count;
+ mto->type = 4;
+ mto->line_type_flags = LNTPFLGS_ENDTEXT;
+ ptr += count;
+ } while ((offset < len) && (ptr + sizeof(*mto) <= end));
+ len = ptr - (unsigned char *) sccb;
+ sccb->header.length = len - offsetof(struct write_sccb, header);
+ msg = &sccb->msg;
+ msg->header.type = EVTYP_MSG;
+ msg->header.length = len - offsetof(struct write_sccb, msg.header);
+ mdb = &msg->mdb;
+ mdb->header.type = 1;
+ mdb->header.tag = 0xD4C4C240;
+ mdb->header.revision_code = 1;
+ mdb->header.length = len - offsetof(struct write_sccb, msg.mdb.header);
+ go = &mdb->go;
+ go->length = sizeof(*go);
+ go->type = 1;
+ sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+}
+
+struct vt220_sccb {
+ struct sccb_header header;
+ struct {
+ struct evbuf_header header;
+ char data[];
+ } msg;
+} __packed;
+
+/* Output multi-line text using SCLP VT220 interface. */
+static void sclp_early_print_vt220(const char *str, unsigned int len)
+{
+ struct vt220_sccb *sccb;
+
+ sccb = (struct vt220_sccb *) &sclp_early_sccb;
+ if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb))
+ len = sizeof(sclp_early_sccb) - sizeof(*sccb);
+ memset(sccb, 0, sizeof(*sccb));
+ memcpy(&sccb->msg.data, str, len);
+ sccb->header.length = sizeof(*sccb) + len;
+ sccb->msg.header.length = sizeof(sccb->msg) + len;
+ sccb->msg.header.type = EVTYP_VT220MSG;
+ sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
+}
+
+int sclp_early_set_event_mask(struct init_sccb *sccb,
+ unsigned long receive_mask,
+ unsigned long send_mask)
+{
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ sccb->mask_length = sizeof(sccb_mask_t);
+ sccb->receive_mask = receive_mask;
+ sccb->send_mask = send_mask;
+ if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb))
+ return -EIO;
+ if (sccb->header.response_code != 0x20)
+ return -EIO;
+ return 0;
+}
+
+unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb)
+{
+ if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
+ return 0;
+ if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+ return 0;
+ return 1;
+}
+
+static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
+{
+ unsigned long receive_mask, send_mask;
+ struct init_sccb *sccb;
+ int rc;
+
+ *have_linemode = *have_vt220 = 0;
+ sccb = (struct init_sccb *) &sclp_early_sccb;
+ receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
+ send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
+ rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
+ if (rc)
+ return rc;
+ *have_linemode = sclp_early_con_check_linemode(sccb);
+ *have_vt220 = sccb->send_mask & EVTYP_VT220MSG_MASK;
+ return rc;
+}
+
+/*
+ * Output one or more lines of text on the SCLP console (VT220 and /
+ * or line-mode).
+ */
+void __sclp_early_printk(const char *str, unsigned int len)
+{
+ int have_linemode, have_vt220;
+
+ if (sclp_init_state != sclp_init_state_uninitialized)
+ return;
+ if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
+ return;
+ if (have_linemode)
+ sclp_early_print_lm(str, len);
+ if (have_vt220)
+ sclp_early_print_vt220(str, len);
+ sclp_early_setup(1, &have_linemode, &have_vt220);
+}
+
+void sclp_early_printk(const char *str)
+{
+ __sclp_early_printk(str, strlen(str));
+}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index d3b51edb056e..aaed778f67c4 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/miscdevice.h>
#include <linux/debugfs.h>
#include <linux/memblock.h>
@@ -273,7 +272,7 @@ static int __init zcore_reipl_init(void)
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
- if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
+ if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) ipl_block);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 876c7e6e3a99..7e0d4f724dda 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -444,6 +444,7 @@ int chp_update_desc(struct channel_path *chp)
*/
int chp_new(struct chp_id chpid)
{
+ struct channel_subsystem *css = css_by_id(chpid.cssid);
struct channel_path *chp;
int ret;
@@ -456,7 +457,7 @@ int chp_new(struct chp_id chpid)
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
- chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
+ chp->dev.parent = &css->device;
chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
mutex_init(&chp->lock);
@@ -479,17 +480,17 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
- mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
- if (channel_subsystems[chpid.cssid]->cm_enabled) {
+ mutex_lock(&css->mutex);
+ if (css->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
device_unregister(&chp->dev);
- mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
+ mutex_unlock(&css->mutex);
goto out;
}
}
- channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
- mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
+ css->chps[chpid.id] = chp;
+ mutex_unlock(&css->mutex);
goto out;
out_free:
kfree(chp);
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index bb5a68226cda..0d8437b7ea72 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -54,7 +54,7 @@ struct channel_path {
/* Return channel_path struct for given chpid. */
static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
{
- return channel_subsystems[chpid.cssid]->chps[chpid.id];
+ return css_by_id(chpid.cssid)->chps[chpid.id];
}
int chp_get_status(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 11674698b36d..7b0b295b2313 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1131,6 +1131,52 @@ int chsc_enable_facility(int operation_code)
return ret;
}
+int __init chsc_get_cssid(int idx)
+{
+ struct {
+ struct chsc_header request;
+ u8 atype;
+ u32 : 24;
+ u32 reserved1[6];
+ struct chsc_header response;
+ u32 reserved2[3];
+ struct {
+ u8 cssid;
+ u32 : 24;
+ } list[0];
+ } __packed *sdcal_area;
+ int ret;
+
+ spin_lock_irq(&chsc_page_lock);
+ memset(chsc_page, 0, PAGE_SIZE);
+ sdcal_area = chsc_page;
+ sdcal_area->request.length = 0x0020;
+ sdcal_area->request.code = 0x0034;
+ sdcal_area->atype = 4;
+
+ ret = chsc(sdcal_area);
+ if (ret) {
+ ret = (ret == 3) ? -ENODEV : -EBUSY;
+ goto exit;
+ }
+
+ ret = chsc_error_from_response(sdcal_area->response.code);
+ if (ret) {
+ CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
+ sdcal_area->response.code);
+ goto exit;
+ }
+
+ if ((addr_t) &sdcal_area->list[idx] <
+ (addr_t) &sdcal_area->response + sdcal_area->response.length)
+ ret = sdcal_area->list[idx].cssid;
+ else
+ ret = -ENODEV;
+exit:
+ spin_unlock_irq(&chsc_page_lock);
+ return ret;
+}
+
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;
@@ -1216,7 +1262,7 @@ int chsc_sstpi(void *page, void *result, size_t size)
struct chsc_header request;
unsigned int rsvd0[3];
struct chsc_header response;
- char data[size];
+ char data[];
} __attribute__ ((packed)) *rr;
int rc;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 67c87b6e63ec..321a3f765810 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -242,6 +242,8 @@ int chsc_pnso_brinfo(struct subchannel_id schid,
struct chsc_brinfo_resume_token resume_token,
int cnc);
+int __init chsc_get_cssid(int idx);
+
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
int scm_process_availability_information(void);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 6b6386e9a500..220491d27ef4 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1085,15 +1085,9 @@ static ssize_t cmb_show_avg_utilization(struct device *dev,
data.function_pending_time +
data.device_disconnect_time;
- /* shift to avoid long long division */
- while (-1ul < (data.elapsed_time | utilization)) {
- utilization >>= 8;
- data.elapsed_time >>= 8;
- }
-
/* calculate value in 0.1 percent units */
- t = (unsigned long) data.elapsed_time / 1000;
- u = (unsigned long) utilization / t;
+ t = data.elapsed_time / 1000;
+ u = utilization / t;
return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index bc099b61394d..e2aa944eb566 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -36,7 +36,8 @@
int css_init_done = 0;
int max_ssid;
-struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
+#define MAX_CSS_IDX 0
+struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
static struct bus_type css_bus_type;
int
@@ -702,7 +703,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
- css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
+ css->global_pgid.pgid_high.ext_cssid.cssid =
+ (css->cssid < 0) ? 0 : css->cssid;
} else {
css->global_pgid.pgid_high.cpu_addr = stap();
}
@@ -712,43 +714,44 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.tod_high = tod_high;
}
-static void
-channel_subsystem_release(struct device *dev)
+static void channel_subsystem_release(struct device *dev)
{
- struct channel_subsystem *css;
+ struct channel_subsystem *css = to_css(dev);
- css = to_css(dev);
mutex_destroy(&css->mutex);
- if (css->pseudo_subchannel) {
- /* Implies that it has been generated but never registered. */
- css_subchannel_release(&css->pseudo_subchannel->dev);
- css->pseudo_subchannel = NULL;
- }
kfree(css);
}
-static ssize_t
-css_cm_enable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ struct channel_subsystem *css = to_css(dev);
+
+ if (css->cssid < 0)
+ return -EINVAL;
+
+ return sprintf(buf, "%x\n", css->cssid);
+}
+static DEVICE_ATTR_RO(real_cssid);
+
+static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
+ char *buf)
{
struct channel_subsystem *css = to_css(dev);
int ret;
- if (!css)
- return 0;
mutex_lock(&css->mutex);
ret = sprintf(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
}
-static ssize_t
-css_cm_enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
+ const char *buf, size_t count)
{
struct channel_subsystem *css = to_css(dev);
- int ret;
unsigned long val;
+ int ret;
ret = kstrtoul(buf, 16, &val);
if (ret)
@@ -767,51 +770,104 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
mutex_unlock(&css->mutex);
return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_RW(cm_enable);
+
+static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
+ int index)
+{
+ return css_chsc_characteristics.secm ? attr->mode : 0;
+}
+
+static struct attribute *cssdev_attrs[] = {
+ &dev_attr_real_cssid.attr,
+ NULL,
+};
+
+static struct attribute_group cssdev_attr_group = {
+ .attrs = cssdev_attrs,
+};
+
+static struct attribute *cssdev_cm_attrs[] = {
+ &dev_attr_cm_enable.attr,
+ NULL,
+};
+
+static struct attribute_group cssdev_cm_attr_group = {
+ .attrs = cssdev_cm_attrs,
+ .is_visible = cm_enable_mode,
+};
-static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
+static const struct attribute_group *cssdev_attr_groups[] = {
+ &cssdev_attr_group,
+ &cssdev_cm_attr_group,
+ NULL,
+};
static int __init setup_css(int nr)
{
- u32 tod_high;
- int ret;
struct channel_subsystem *css;
+ int ret;
- css = channel_subsystems[nr];
- memset(css, 0, sizeof(struct channel_subsystem));
- css->pseudo_subchannel =
- kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
- if (!css->pseudo_subchannel)
+ css = kzalloc(sizeof(*css), GFP_KERNEL);
+ if (!css)
return -ENOMEM;
+
+ channel_subsystems[nr] = css;
+ dev_set_name(&css->device, "css%x", nr);
+ css->device.groups = cssdev_attr_groups;
+ css->device.release = channel_subsystem_release;
+
+ mutex_init(&css->mutex);
+ css->cssid = chsc_get_cssid(nr);
+ css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
+
+ ret = device_register(&css->device);
+ if (ret) {
+ put_device(&css->device);
+ goto out_err;
+ }
+
+ css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
+ GFP_KERNEL);
+ if (!css->pseudo_subchannel) {
+ device_unregister(&css->device);
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
- dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
- return ret;
+ device_unregister(&css->device);
+ goto out_err;
}
- mutex_init(&css->mutex);
- css->valid = 1;
- css->cssid = nr;
- dev_set_name(&css->device, "css%x", nr);
- css->device.release = channel_subsystem_release;
- tod_high = (u32) (get_tod_clock() >> 32);
- css_generate_pgid(css, tod_high);
- return 0;
+
+ dev_set_name(&css->pseudo_subchannel->dev, "defunct");
+ ret = device_register(&css->pseudo_subchannel->dev);
+ if (ret) {
+ put_device(&css->pseudo_subchannel->dev);
+ device_unregister(&css->device);
+ goto out_err;
+ }
+
+ return ret;
+out_err:
+ channel_subsystems[nr] = NULL;
+ return ret;
}
static int css_reboot_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
- int ret, i;
+ struct channel_subsystem *css;
+ int ret;
ret = NOTIFY_DONE;
- for (i = 0; i <= __MAX_CSSID; i++) {
- struct channel_subsystem *css;
-
- css = channel_subsystems[i];
+ for_each_css(css) {
mutex_lock(&css->mutex);
if (css->cm_enabled)
if (chsc_secm(css, 0))
@@ -835,16 +891,14 @@ static struct notifier_block css_reboot_notifier = {
static int css_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
- int ret, i;
+ struct channel_subsystem *css;
+ int ret;
switch (event) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
ret = NOTIFY_DONE;
- for (i = 0; i <= __MAX_CSSID; i++) {
- struct channel_subsystem *css;
-
- css = channel_subsystems[i];
+ for_each_css(css) {
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
@@ -858,10 +912,7 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
ret = NOTIFY_DONE;
- for (i = 0; i <= __MAX_CSSID; i++) {
- struct channel_subsystem *css;
-
- css = channel_subsystems[i];
+ for_each_css(css) {
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
@@ -916,36 +967,10 @@ static int __init css_bus_init(void)
goto out;
/* Setup css structure. */
- for (i = 0; i <= __MAX_CSSID; i++) {
- struct channel_subsystem *css;
-
- css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
- if (!css) {
- ret = -ENOMEM;
- goto out_unregister;
- }
- channel_subsystems[i] = css;
+ for (i = 0; i <= MAX_CSS_IDX; i++) {
ret = setup_css(i);
- if (ret) {
- kfree(channel_subsystems[i]);
- goto out_unregister;
- }
- ret = device_register(&css->device);
- if (ret) {
- put_device(&css->device);
+ if (ret)
goto out_unregister;
- }
- if (css_chsc_characteristics.secm) {
- ret = device_create_file(&css->device,
- &dev_attr_cm_enable);
- if (ret)
- goto out_device;
- }
- ret = device_register(&css->pseudo_subchannel->dev);
- if (ret) {
- put_device(&css->pseudo_subchannel->dev);
- goto out_file;
- }
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
@@ -961,23 +986,10 @@ static int __init css_bus_init(void)
isc_register(IO_SCH_ISC);
return 0;
-out_file:
- if (css_chsc_characteristics.secm)
- device_remove_file(&channel_subsystems[i]->device,
- &dev_attr_cm_enable);
-out_device:
- device_unregister(&channel_subsystems[i]->device);
out_unregister:
- while (i > 0) {
- struct channel_subsystem *css;
-
- i--;
- css = channel_subsystems[i];
+ while (i-- > 0) {
+ struct channel_subsystem *css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
- css->pseudo_subchannel = NULL;
- if (css_chsc_characteristics.secm)
- device_remove_file(&css->device,
- &dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
@@ -993,14 +1005,9 @@ out:
static void __init css_bus_cleanup(void)
{
struct channel_subsystem *css;
- int i;
- for (i = 0; i <= __MAX_CSSID; i++) {
- css = channel_subsystems[i];
+ for_each_css(css) {
device_unregister(&css->pseudo_subchannel->dev);
- css->pseudo_subchannel = NULL;
- if (css_chsc_characteristics.secm)
- device_remove_file(&css->device, &dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2c9107e20251..c9f3fb39ebeb 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -113,8 +113,7 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {
- u8 cssid;
- int valid;
+ int cssid;
struct channel_path *chps[__MAX_CHPID + 1];
struct device device;
struct pgid global_pgid;
@@ -130,6 +129,16 @@ struct channel_subsystem {
extern struct channel_subsystem *channel_subsystems[];
+/* Dummy helper which needs to change once we support more than one css. */
+static inline struct channel_subsystem *css_by_id(u8 cssid)
+{
+ return channel_subsystems[0];
+}
+
+/* Dummy iterator which needs to change once we support more than one css. */
+#define for_each_css(css) \
+ for ((css) = channel_subsystems[0]; (css); (css) = NULL)
+
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 8225da619014..4182f60124da 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -165,13 +165,15 @@ int tpi(struct tpi_info *addr)
int chsc(void *chsc_area)
{
typedef struct { char _[4096]; } addr_type;
- int cc;
+ int cc = -EIO;
asm volatile(
" .insn rre,0xb25f0000,%2,0\n"
- " ipm %0\n"
+ "0: ipm %0\n"
" srl %0,28\n"
- : "=d" (cc), "=m" (*(addr_type *) chsc_area)
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (cc), "=m" (*(addr_type *) chsc_area)
: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
: "cc");
trace_s390_cio_chsc(chsc_area, cc);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 71bf9bded485..a4ad39ba3873 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -457,7 +457,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
{
int new;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
@@ -544,7 +544,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
+ q->nr, q->first_to_check);
break;
default:
WARN_ON_ONCE(1);
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 5d06253c2a7a..8ad98a902a91 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
struct qdio_q *q;
int i;
- for_each_input_queue(irq, q, i) {
- if (!references_shared_dsci(irq) &&
- has_multiple_inq_on_dsci(irq))
- xchg(q->irq_ptr->dsci, 0);
+ if (!references_shared_dsci(irq) &&
+ has_multiple_inq_on_dsci(irq))
+ xchg(irq->dsci, 0);
+ for_each_input_queue(irq, q, i) {
if (q->u.in.queue_start_poll) {
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
@@ -161,11 +161,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
}
/* avoid dsci clear here, done after processing */
- q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
- q->irq_ptr->int_parm);
+ q->u.in.queue_start_poll(irq->cdev, q->nr,
+ irq->int_parm);
} else {
- if (!shared_ind(q->irq_ptr))
- xchg(q->irq_ptr->dsci, 0);
+ if (!shared_ind(irq))
+ xchg(irq->dsci, 0);
/*
* Call inbound processing but not directly
@@ -178,8 +178,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
- * @alsi: pointer to adapter local summary indicator
- * @data: NULL
+ * @airq: pointer to adapter interrupt descriptor
*/
static void tiqdio_thinint_handler(struct airq_struct *airq)
{
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 0a7fb83f35e5..be36f1010d75 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -10,3 +10,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
+
+# pkey kernel module
+pkey-objs := pkey_api.o
+obj-$(CONFIG_PKEY) += pkey.o
diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h
index 7a630047c372..287b4ad0999e 100644
--- a/drivers/s390/crypto/ap_asm.h
+++ b/drivers/s390/crypto/ap_asm.h
@@ -129,7 +129,6 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
unsigned long long psmid,
void *msg, size_t length)
{
- struct msgblock { char _[length]; };
register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = (unsigned long) msg;
@@ -141,8 +140,8 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
"0: .long 0xb2ad0042\n" /* NQAP */
" brc 2,0b"
: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
- : "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
- : "cc");
+ : "d" (reg4), "d" (reg5)
+ : "cc", "memory");
return reg1;
}
@@ -168,7 +167,6 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
unsigned long long *psmid,
void *msg, size_t length)
{
- struct msgblock { char _[length]; };
register unsigned long reg0 asm("0") = qid | 0x80000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm("2") = 0UL;
@@ -182,8 +180,8 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
"0: .long 0xb2ae0064\n" /* DQAP */
" brc 6,0b\n"
: "+d" (reg0), "=d" (reg1), "+d" (reg2),
- "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
- "=m" (*(struct msgblock *) msg) : : "cc");
+ "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
+ : : "cc", "memory");
*psmid = (((unsigned long long) reg6) << 32) + reg7;
return reg1;
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 5fa699192864..9be4596d8a08 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -27,7 +27,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -54,16 +54,7 @@
#include "ap_debug.h"
/*
- * Module description.
- */
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
- "Copyright IBM Corp. 2006, 2012");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CRYPTO("z90crypt");
-
-/*
- * Module parameter
+ * Module parameters; note though this file itself isn't modular.
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
@@ -86,7 +77,6 @@ static bool initialised;
/*
* AP bus related debug feature things.
*/
-static struct dentry *ap_dbf_root;
debug_info_t *ap_dbf_info;
/*
@@ -1117,16 +1107,6 @@ static void ap_config_timeout(unsigned long ptr)
queue_work(system_long_wq, &ap_scan_work);
}
-static void ap_reset_domain(void)
-{
- int i;
-
- if (ap_domain_index == -1 || !ap_test_config_domain(ap_domain_index))
- return;
- for (i = 0; i < AP_DEVICES; i++)
- ap_rapq(AP_MKQID(i, ap_domain_index));
-}
-
static void ap_reset_all(void)
{
int i, j;
@@ -1148,7 +1128,6 @@ static struct reset_call ap_reset_call = {
int __init ap_debug_init(void)
{
- ap_dbf_root = debugfs_create_dir("ap", NULL);
ap_dbf_info = debug_register("ap", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
@@ -1159,7 +1138,6 @@ int __init ap_debug_init(void)
void ap_debug_exit(void)
{
- debugfs_remove(ap_dbf_root);
debug_unregister(ap_dbf_info);
}
@@ -1270,43 +1248,4 @@ out_free:
kfree(ap_configuration);
return rc;
}
-
-/**
- * ap_modules_exit(): The module termination code
- *
- * Terminates the module.
- */
-void ap_module_exit(void)
-{
- int i;
-
- initialised = false;
- ap_reset_domain();
- ap_poll_thread_stop();
- del_timer_sync(&ap_config_timer);
- hrtimer_cancel(&ap_poll_timer);
- tasklet_kill(&ap_tasklet);
-
- /* first remove queue devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_queue_devices_unregister);
- /* now remove the card devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_card_devices_unregister);
-
- /* remove bus attributes */
- for (i = 0; ap_bus_attrs[i]; i++)
- bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
- unregister_pm_notifier(&ap_power_notifier);
- root_device_unregister(ap_root_device);
- bus_unregister(&ap_bus_type);
- kfree(ap_configuration);
- unregister_reset_call(&ap_reset_call);
- if (ap_using_interrupts())
- unregister_adapter_interrupt(&ap_airq);
-
- ap_debug_exit();
-}
-
-module_init(ap_module_init);
-module_exit(ap_module_exit);
+device_initcall(ap_module_init);
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 0110d44172a3..cfa161ccc74e 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -58,9 +58,9 @@ static ssize_t ap_functions_show(struct device *dev,
static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
-static ssize_t ap_request_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ap_req_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ap_card *ac = to_ap_card(dev);
unsigned int req_cnt;
@@ -72,7 +72,23 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
-static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+static ssize_t ap_req_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_list_lock);
+ for_each_ap_queue(aq, ac)
+ aq->total_request_count = 0;
+ spin_unlock_bh(&ap_list_lock);
+ atomic_set(&ac->total_request_count, 0);
+
+ return count;
+}
+
+static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -137,7 +153,7 @@ static const struct attribute_group *ap_card_dev_attr_groups[] = {
NULL
};
-struct device_type ap_card_type = {
+static struct device_type ap_card_type = {
.name = "ap_card",
.groups = ap_card_dev_attr_groups,
};
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index b58a917dc510..480c58a63769 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -459,9 +459,9 @@ EXPORT_SYMBOL(ap_queue_resume);
/*
* AP queue related attributes.
*/
-static ssize_t ap_request_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ap_req_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
unsigned int req_cnt;
@@ -472,7 +472,20 @@ static ssize_t ap_request_count_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
}
-static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
+static ssize_t ap_req_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+
+ spin_lock_bh(&aq->lock);
+ aq->total_request_count = 0;
+ spin_unlock_bh(&aq->lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(request_count, 0644, ap_req_count_show, ap_req_count_store);
static ssize_t ap_requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -564,7 +577,7 @@ static const struct attribute_group *ap_queue_dev_attr_groups[] = {
NULL
};
-struct device_type ap_queue_type = {
+static struct device_type ap_queue_type = {
.name = "ap_queue",
.groups = ap_queue_dev_attr_groups,
};
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
new file mode 100644
index 000000000000..40f1136f5568
--- /dev/null
+++ b/drivers/s390/crypto/pkey_api.c
@@ -0,0 +1,1148 @@
+/*
+ * pkey device driver
+ *
+ * Copyright IBM Corp. 2017
+ * Author(s): Harald Freudenberger
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#define KMSG_COMPONENT "pkey"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kallsyms.h>
+#include <linux/debugfs.h>
+#include <asm/zcrypt.h>
+#include <asm/cpacf.h>
+#include <asm/pkey.h>
+
+#include "zcrypt_api.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key interface");
+
+/* Size of parameter block used for all cca requests/replies */
+#define PARMBSIZE 512
+
+/* Size of vardata block used for some of the cca requests/replies */
+#define VARDATASIZE 4096
+
+/*
+ * debug feature data and functions
+ */
+
+static debug_info_t *debug_info;
+
+#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
+#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
+#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
+#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+
+static void __init pkey_debug_init(void)
+{
+ debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
+ debug_register_view(debug_info, &debug_sprintf_view);
+ debug_set_level(debug_info, 3);
+}
+
+static void __exit pkey_debug_exit(void)
+{
+ debug_unregister(debug_info);
+}
+
+/* inside view of a secure key token (only type 0x01 version 0x04) */
+struct secaeskeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[3];
+ u8 version; /* should be 0x04 */
+ u8 res1[1];
+ u8 flag; /* key flags */
+ u8 res2[1];
+ u64 mkvp; /* master key verification pattern */
+ u8 key[32]; /* key value (encrypted) */
+ u8 cv[8]; /* control vector */
+ u16 bitsize; /* key bit size */
+ u16 keysize; /* key byte size */
+ u8 tvv[4]; /* token validation value */
+} __packed;
+
+/*
+ * Simple check if the token is a valid CCA secure AES key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+static int check_secaeskeytoken(u8 *token, int keybitsize)
+{
+ struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+
+ if (t->type != 0x01) {
+ DEBUG_ERR(
+ "check_secaeskeytoken secure token check failed, type mismatch 0x%02x != 0x01\n",
+ (int) t->type);
+ return -EINVAL;
+ }
+ if (t->version != 0x04) {
+ DEBUG_ERR(
+ "check_secaeskeytoken secure token check failed, version mismatch 0x%02x != 0x04\n",
+ (int) t->version);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && t->bitsize != keybitsize) {
+ DEBUG_ERR(
+ "check_secaeskeytoken secure token check failed, bitsize mismatch %d != %d\n",
+ (int) t->bitsize, keybitsize);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block and fill in values
+ * for the common fields. Returns 0 on success or errno value
+ * on failure.
+ */
+static int alloc_and_prep_cprbmem(size_t paramblen,
+ u8 **pcprbmem,
+ struct CPRBX **preqCPRB,
+ struct CPRBX **prepCPRB)
+{
+ u8 *cprbmem;
+ size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
+ struct CPRBX *preqcblk, *prepcblk;
+
+ /*
+ * allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block
+ */
+ cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL);
+ if (!cprbmem)
+ return -ENOMEM;
+ memset(cprbmem, 0, 2 * cprbplusparamblen);
+
+ preqcblk = (struct CPRBX *) cprbmem;
+ prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
+
+ /* fill request cprb struct */
+ preqcblk->cprb_len = sizeof(struct CPRBX);
+ preqcblk->cprb_ver_id = 0x02;
+ memcpy(preqcblk->func_id, "T2", 2);
+ preqcblk->rpl_msgbl = cprbplusparamblen;
+ if (paramblen) {
+ preqcblk->req_parmb =
+ ((u8 *) preqcblk) + sizeof(struct CPRBX);
+ preqcblk->rpl_parmb =
+ ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ }
+
+ *pcprbmem = cprbmem;
+ *preqCPRB = preqcblk;
+ *prepCPRB = prepcblk;
+
+ return 0;
+}
+
+/*
+ * Free the cprb memory allocated with the function above.
+ * If the scrub value is not zero, the memory is filled
+ * with zeros before freeing (useful if there was some
+ * clear key material in there).
+ */
+static void free_cprbmem(void *mem, size_t paramblen, int scrub)
+{
+ if (scrub)
+ memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
+ kfree(mem);
+}
+
+/*
+ * Helper function to prepare the xcrb struct
+ */
+static inline void prep_xcrb(struct ica_xcRB *pxcrb,
+ u16 cardnr,
+ struct CPRBX *preqcblk,
+ struct CPRBX *prepcblk)
+{
+ memset(pxcrb, 0, sizeof(*pxcrb));
+ pxcrb->agent_ID = 0x4341; /* 'CA' */
+ pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
+ pxcrb->request_control_blk_length =
+ preqcblk->cprb_len + preqcblk->req_parml;
+ pxcrb->request_control_blk_addr = (void *) preqcblk;
+ pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
+ pxcrb->reply_control_blk_addr = (void *) prepcblk;
+}
+
+/*
+ * Helper function which calls zcrypt_send_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
+{
+ int rc;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ rc = zcrypt_send_cprb(xcrb);
+ set_fs(old_fs);
+
+ return rc;
+}
+
+/*
+ * Generate (random) AES secure key.
+ */
+int pkey_genseckey(u16 cardnr, u16 domain,
+ u32 keytype, struct pkey_seckey *seckey)
+{
+ int i, rc, keysize;
+ int seckeysize;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct kgreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ char key_form[8];
+ char key_length[8];
+ char key_type1[8];
+ char key_type2[8];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid[6];
+ } lv2;
+ } *preqparm;
+ struct kgrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } *prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with KG request */
+ preqparm = (struct kgreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "KG", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ memcpy(preqparm->lv1.key_form, "OP ", 8);
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ keysize = 16;
+ memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ keysize = 24;
+ memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ keysize = 32;
+ memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
+ break;
+ default:
+ DEBUG_ERR(
+ "pkey_genseckey unknown/unsupported keytype %d\n",
+ keytype);
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
+ preqparm->lv2.len = sizeof(struct lv2);
+ for (i = 0; i < 6; i++) {
+ preqparm->lv2.keyid[i].len = sizeof(struct keyid);
+ preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
+ }
+ preqcblk->req_parml = sizeof(struct kgreqparm);
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "pkey_genseckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "pkey_genseckey secure key generate failure, card response %d/%d\n",
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR(
+ "pkey_genseckey secure token size mismatch %d != %d bytes\n",
+ seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(pkey_genseckey);
+
+/*
+ * Generate an AES secure key with given key value.
+ */
+int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
+ const struct pkey_clrkey *clrkey,
+ struct pkey_seckey *seckey)
+{
+ int rc, keysize, seckeysize;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct cmreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 clrkey[0];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid;
+ } lv2;
+ } *preqparm;
+ struct lv2 *plv2;
+ struct cmrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[0];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } *prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with CM request */
+ preqparm = (struct cmreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "CM", 2);
+ memcpy(preqparm->rule_array, "AES ", 8);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ keysize = 16;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ keysize = 24;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ keysize = 32;
+ break;
+ default:
+ DEBUG_ERR(
+ "pkey_clr2seckey unknown/unsupported keytype %d\n",
+ keytype);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->lv1.len = sizeof(struct lv1) + keysize;
+ memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize);
+ plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
+ plv2->len = sizeof(struct lv2);
+ plv2->keyid.len = sizeof(struct keyid);
+ plv2->keyid.attr = 0x30;
+ preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "pkey_clr2seckey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "pkey_clr2seckey clear key import failure, card response %d/%d\n",
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ DEBUG_ERR(
+ "pkey_clr2seckey secure token size mismatch %d != %d bytes\n",
+ seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 1);
+ return rc;
+}
+EXPORT_SYMBOL(pkey_clr2seckey);
+
+/*
+ * Derive a proteced key from the secure key blob.
+ */
+int pkey_sec2protkey(u16 cardnr, u16 domain,
+ const struct pkey_seckey *seckey,
+ struct pkey_protkey *protkey)
+{
+ int rc;
+ u8 *mem;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct uskreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ } lv1;
+ struct lv2 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ u8 token[0]; /* cca secure key token */
+ } lv2 __packed;
+ } *preqparm;
+ struct uskrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[64]; /* the key (keylen bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } keyblock;
+ } lv3 __packed;
+ } *prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with USK request */
+ preqparm = (struct uskreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "US", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
+ preqparm->lv1.attr_flags = 0x0001;
+ preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_len = sizeof(struct lv2)
+ - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_flags = 0x0000;
+ memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE);
+ preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "pkey_sec2protkey zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "pkey_sec2protkey unwrap secure key failure, card response %d/%d\n",
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
+
+ /* check the returned keyblock */
+ if (prepparm->lv3.keyblock.version != 0x01) {
+ DEBUG_ERR(
+ "pkey_sec2protkey reply param keyblock version mismatch 0x%02x != 0x01\n",
+ (int) prepparm->lv3.keyblock.version);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ switch (prepparm->lv3.keyblock.keylen) {
+ case 16+32:
+ protkey->type = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ protkey->type = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ protkey->type = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("pkey_sec2protkey unknown/unsupported keytype %d\n",
+ prepparm->lv3.keyblock.keylen);
+ rc = -EIO;
+ goto out;
+ }
+ protkey->len = prepparm->lv3.keyblock.keylen;
+ memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, 0);
+ return rc;
+}
+EXPORT_SYMBOL(pkey_sec2protkey);
+
+/*
+ * Create a protected key from a clear key value.
+ */
+int pkey_clr2protkey(u32 keytype,
+ const struct pkey_clrkey *clrkey,
+ struct pkey_protkey *protkey)
+{
+ long fc;
+ int keysize;
+ u8 paramblock[64];
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ keysize = 16;
+ fc = CPACF_PCKMO_ENC_AES_128_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ keysize = 24;
+ fc = CPACF_PCKMO_ENC_AES_192_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ keysize = 32;
+ fc = CPACF_PCKMO_ENC_AES_256_KEY;
+ break;
+ default:
+ DEBUG_ERR("pkey_clr2protkey unknown/unsupported keytype %d\n",
+ keytype);
+ return -EINVAL;
+ }
+
+ /* prepare param block */
+ memset(paramblock, 0, sizeof(paramblock));
+ memcpy(paramblock, clrkey->clrkey, keysize);
+
+ /* call the pckmo instruction */
+ cpacf_pckmo(fc, paramblock);
+
+ /* copy created protected key */
+ protkey->type = keytype;
+ protkey->len = keysize + 32;
+ memcpy(protkey->protkey, paramblock, keysize + 32);
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_clr2protkey);
+
+/*
+ * query cryptographic facility from adapter
+ */
+static int query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen)
+{
+ int rc;
+ u16 len;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct fqreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 data[VARDATASIZE];
+ } lv1;
+ u16 dummylen;
+ } *preqparm;
+ size_t parmbsize = sizeof(struct fqreqparm);
+ struct fqrepparm {
+ u8 subfunc_code[2];
+ u8 lvdata[0];
+ } *prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with FQ request */
+ preqparm = (struct fqreqparm *) preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "FQ", 2);
+ strncpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ preqparm->lv1.len = sizeof(preqparm->lv1);
+ preqparm->dummylen = sizeof(preqparm->dummylen);
+ preqcblk->req_parml = parmbsize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = _zcrypt_send_cprb(&xcrb);
+ if (rc) {
+ DEBUG_ERR(
+ "query_crypto_facility zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
+ (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ DEBUG_ERR(
+ "query_crypto_facility unwrap secure key failure, card response %d/%d\n",
+ (int) prepcblk->ccp_rtcode,
+ (int) prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
+ prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
+ ptr = prepparm->lvdata;
+
+ /* check and possibly copy reply rule array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (rarray && rarraylen && *rarraylen > 0) {
+ *rarraylen = (len > *rarraylen ? *rarraylen : len);
+ memcpy(rarray, ptr, *rarraylen);
+ }
+ ptr += len;
+ }
+ /* check and possible copy reply var array */
+ len = *((u16 *) ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (varray && varraylen && *varraylen > 0) {
+ *varraylen = (len > *varraylen ? *varraylen : len);
+ memcpy(varray, ptr, *varraylen);
+ }
+ ptr += len;
+ }
+
+out:
+ free_cprbmem(mem, parmbsize, 0);
+ return rc;
+}
+
+/*
+ * Fetch just the mkvp value via query_crypto_facility from adapter.
+ */
+static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
+{
+ int rc, found = 0;
+ size_t rlen, vlen;
+ u8 *rarray, *varray, *pg;
+
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+ rarray = pg;
+ varray = pg + PAGE_SIZE/2;
+ rlen = vlen = PAGE_SIZE/2;
+
+ rc = query_crypto_facility(cardnr, domain, "STATICSA",
+ rarray, &rlen, varray, &vlen);
+ if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
+ if (rarray[64] == '2') {
+ /* current master key state is valid */
+ *mkvp = *((u64 *)(varray + 184));
+ found = 1;
+ }
+ }
+
+ free_page((unsigned long) pg);
+
+ return found ? 0 : -ENOENT;
+}
+
+/* struct to hold cached mkvp info for each card/domain */
+struct mkvp_info {
+ struct list_head list;
+ u16 cardnr;
+ u16 domain;
+ u64 mkvp;
+};
+
+/* a list with mkvp_info entries */
+static LIST_HEAD(mkvp_list);
+static DEFINE_SPINLOCK(mkvp_list_lock);
+
+static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
+{
+ int rc = -ENOENT;
+ struct mkvp_info *ptr;
+
+ spin_lock_bh(&mkvp_list_lock);
+ list_for_each_entry(ptr, &mkvp_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ *mkvp = ptr->mkvp;
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&mkvp_list_lock);
+
+ return rc;
+}
+
+static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
+{
+ int found = 0;
+ struct mkvp_info *ptr;
+
+ spin_lock_bh(&mkvp_list_lock);
+ list_for_each_entry(ptr, &mkvp_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ ptr->mkvp = mkvp;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&mkvp_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ ptr->domain = domain;
+ ptr->mkvp = mkvp;
+ list_add(&ptr->list, &mkvp_list);
+ }
+ spin_unlock_bh(&mkvp_list_lock);
+}
+
+static void mkvp_cache_scrub(u16 cardnr, u16 domain)
+{
+ struct mkvp_info *ptr;
+
+ spin_lock_bh(&mkvp_list_lock);
+ list_for_each_entry(ptr, &mkvp_list, list) {
+ if (ptr->cardnr == cardnr &&
+ ptr->domain == domain) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&mkvp_list_lock);
+}
+
+static void __exit mkvp_cache_free(void)
+{
+ struct mkvp_info *ptr, *pnext;
+
+ spin_lock_bh(&mkvp_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&mkvp_list_lock);
+}
+
+/*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
+ */
+int pkey_findcard(const struct pkey_seckey *seckey,
+ u16 *pcardnr, u16 *pdomain, int verify)
+{
+ struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
+ struct zcrypt_device_matrix *device_matrix;
+ u16 card, dom;
+ u64 mkvp;
+ int i, rc;
+
+ /* mkvp must not be zero */
+ if (t->mkvp == 0)
+ return -EINVAL;
+
+ /* fetch status of all crypto cards */
+ device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix),
+ GFP_KERNEL);
+ if (!device_matrix)
+ return -ENOMEM;
+ zcrypt_device_status_mask(device_matrix);
+
+ /* walk through all crypto cards */
+ for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
+ card = AP_QID_CARD(device_matrix->device[i].qid);
+ dom = AP_QID_QUEUE(device_matrix->device[i].qid);
+ if (device_matrix->device[i].online &&
+ device_matrix->device[i].functions & 0x04) {
+ /* an enabled CCA Coprocessor card */
+ /* try cached mkvp */
+ if (mkvp_cache_fetch(card, dom, &mkvp) == 0 &&
+ t->mkvp == mkvp) {
+ if (!verify)
+ break;
+ /* verify: fetch mkvp from adapter */
+ if (fetch_mkvp(card, dom, &mkvp) == 0) {
+ mkvp_cache_update(card, dom, mkvp);
+ if (t->mkvp == mkvp)
+ break;
+ }
+ }
+ } else {
+ /* Card is offline and/or not a CCA card. */
+ /* del mkvp entry from cache if it exists */
+ mkvp_cache_scrub(card, dom);
+ }
+ }
+ if (i >= MAX_ZDEV_ENTRIES) {
+ /* nothing found, so this time without cache */
+ for (i = 0; i < MAX_ZDEV_ENTRIES; i++) {
+ if (!(device_matrix->device[i].online &&
+ device_matrix->device[i].functions & 0x04))
+ continue;
+ card = AP_QID_CARD(device_matrix->device[i].qid);
+ dom = AP_QID_QUEUE(device_matrix->device[i].qid);
+ /* fresh fetch mkvp from adapter */
+ if (fetch_mkvp(card, dom, &mkvp) == 0) {
+ mkvp_cache_update(card, dom, mkvp);
+ if (t->mkvp == mkvp)
+ break;
+ }
+ }
+ }
+ if (i < MAX_ZDEV_ENTRIES) {
+ if (pcardnr)
+ *pcardnr = card;
+ if (pdomain)
+ *pdomain = dom;
+ rc = 0;
+ } else
+ rc = -ENODEV;
+
+ kfree(device_matrix);
+ return rc;
+}
+EXPORT_SYMBOL(pkey_findcard);
+
+/*
+ * Find card and transform secure key into protected key.
+ */
+int pkey_skey2pkey(const struct pkey_seckey *seckey,
+ struct pkey_protkey *protkey)
+{
+ u16 cardnr, domain;
+ int rc, verify;
+
+ /*
+ * The pkey_sec2protkey call may fail when a card has been
+ * addressed where the master key was changed after last fetch
+ * of the mkvp into the cache. So first try without verify then
+ * with verify enabled (thus refreshing the mkvp for each card).
+ */
+ for (verify = 0; verify < 2; verify++) {
+ rc = pkey_findcard(seckey, &cardnr, &domain, verify);
+ if (rc)
+ continue;
+ rc = pkey_sec2protkey(cardnr, domain, seckey, protkey);
+ if (rc == 0)
+ break;
+ }
+
+ if (rc)
+ DEBUG_DBG("pkey_skey2pkey failed rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_skey2pkey);
+
+/*
+ * File io functions
+ */
+
+static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+
+ switch (cmd) {
+ case PKEY_GENSECK: {
+ struct pkey_genseck __user *ugs = (void __user *) arg;
+ struct pkey_genseck kgs;
+
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ rc = pkey_genseckey(kgs.cardnr, kgs.domain,
+ kgs.keytype, &kgs.seckey);
+ DEBUG_DBG("pkey_ioctl pkey_genseckey()=%d\n", rc);
+ if (rc)
+ break;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_CLR2SECK: {
+ struct pkey_clr2seck __user *ucs = (void __user *) arg;
+ struct pkey_clr2seck kcs;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+ rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
+ &kcs.clrkey, &kcs.seckey);
+ DEBUG_DBG("pkey_ioctl pkey_clr2seckey()=%d\n", rc);
+ if (rc)
+ break;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ return -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ break;
+ }
+ case PKEY_SEC2PROTK: {
+ struct pkey_sec2protk __user *usp = (void __user *) arg;
+ struct pkey_sec2protk ksp;
+
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+ rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
+ &ksp.seckey, &ksp.protkey);
+ DEBUG_DBG("pkey_ioctl pkey_sec2protkey()=%d\n", rc);
+ if (rc)
+ break;
+ if (copy_to_user(usp, &ksp, sizeof(ksp)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_CLR2PROTK: {
+ struct pkey_clr2protk __user *ucp = (void __user *) arg;
+ struct pkey_clr2protk kcp;
+
+ if (copy_from_user(&kcp, ucp, sizeof(kcp)))
+ return -EFAULT;
+ rc = pkey_clr2protkey(kcp.keytype,
+ &kcp.clrkey, &kcp.protkey);
+ DEBUG_DBG("pkey_ioctl pkey_clr2protkey()=%d\n", rc);
+ if (rc)
+ break;
+ if (copy_to_user(ucp, &kcp, sizeof(kcp)))
+ return -EFAULT;
+ memzero_explicit(&kcp, sizeof(kcp));
+ break;
+ }
+ case PKEY_FINDCARD: {
+ struct pkey_findcard __user *ufc = (void __user *) arg;
+ struct pkey_findcard kfc;
+
+ if (copy_from_user(&kfc, ufc, sizeof(kfc)))
+ return -EFAULT;
+ rc = pkey_findcard(&kfc.seckey,
+ &kfc.cardnr, &kfc.domain, 1);
+ DEBUG_DBG("pkey_ioctl pkey_findcard()=%d\n", rc);
+ if (rc)
+ break;
+ if (copy_to_user(ufc, &kfc, sizeof(kfc)))
+ return -EFAULT;
+ break;
+ }
+ case PKEY_SKEY2PKEY: {
+ struct pkey_skey2pkey __user *usp = (void __user *) arg;
+ struct pkey_skey2pkey ksp;
+
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+ rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
+ DEBUG_DBG("pkey_ioctl pkey_skey2pkey()=%d\n", rc);
+ if (rc)
+ break;
+ if (copy_to_user(usp, &ksp, sizeof(ksp)))
+ return -EFAULT;
+ break;
+ }
+ default:
+ /* unknown/unsupported ioctl cmd */
+ return -ENOTTY;
+ }
+
+ return rc;
+}
+
+/*
+ * Sysfs and file io operations
+ */
+static const struct file_operations pkey_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .llseek = no_llseek,
+ .unlocked_ioctl = pkey_unlocked_ioctl,
+};
+
+static struct miscdevice pkey_dev = {
+ .name = "pkey",
+ .minor = MISC_DYNAMIC_MINOR,
+ .mode = 0666,
+ .fops = &pkey_fops,
+};
+
+/*
+ * Module init
+ */
+int __init pkey_init(void)
+{
+ cpacf_mask_t pckmo_functions;
+
+ /* check for pckmo instructions available */
+ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
+ return -EOPNOTSUPP;
+ if (!cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_128_KEY) ||
+ !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_192_KEY) ||
+ !cpacf_test_func(&pckmo_functions, CPACF_PCKMO_ENC_AES_256_KEY))
+ return -EOPNOTSUPP;
+
+ pkey_debug_init();
+
+ return misc_register(&pkey_dev);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_exit(void)
+{
+ misc_deregister(&pkey_dev);
+ mkvp_cache_free();
+ pkey_debug_exit();
+}
+
+module_init(pkey_init);
+module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 51eece9af577..93015f85d4a6 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -81,7 +81,6 @@ EXPORT_SYMBOL(zcrypt_rescan_req);
static LIST_HEAD(zcrypt_ops_list);
/* Zcrypt related debug feature stuff. */
-static struct dentry *zcrypt_dbf_root;
debug_info_t *zcrypt_dbf_info;
/**
@@ -201,7 +200,7 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
unsigned weight, unsigned pref_weight)
{
if (!pref_zc)
- return 0;
+ return false;
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
@@ -215,7 +214,7 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
unsigned weight, unsigned pref_weight)
{
if (!pref_zq)
- return 0;
+ return false;
weight += atomic_read(&zq->load);
pref_weight += atomic_read(&pref_zq->load);
if (weight == pref_weight)
@@ -375,7 +374,7 @@ out:
return rc;
}
-static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+long zcrypt_send_cprb(struct ica_xcRB *xcRB)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -445,6 +444,7 @@ out:
AP_QID_CARD(qid), AP_QID_QUEUE(qid));
return rc;
}
+EXPORT_SYMBOL(zcrypt_send_cprb);
static bool is_desired_ep11_card(unsigned int dev_id,
unsigned short target_num,
@@ -620,7 +620,7 @@ out:
return rc;
}
-static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
+void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
@@ -668,6 +668,7 @@ static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
memset(qdepth, 0, sizeof(char) * AP_DEVICES);
spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
@@ -679,6 +680,7 @@ static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
spin_unlock(&zq->queue->lock);
}
}
+ local_bh_enable();
spin_unlock(&zcrypt_list_lock);
}
@@ -689,6 +691,7 @@ static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
@@ -699,6 +702,7 @@ static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
spin_unlock(&zq->queue->lock);
}
}
+ local_bh_enable();
spin_unlock(&zcrypt_list_lock);
}
@@ -710,6 +714,7 @@ static int zcrypt_pendingq_count(void)
pendingq_count = 0;
spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
@@ -719,6 +724,7 @@ static int zcrypt_pendingq_count(void)
spin_unlock(&zq->queue->lock);
}
}
+ local_bh_enable();
spin_unlock(&zcrypt_list_lock);
return pendingq_count;
}
@@ -731,6 +737,7 @@ static int zcrypt_requestq_count(void)
requestq_count = 0;
spin_lock(&zcrypt_list_lock);
+ local_bh_disable();
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
@@ -740,6 +747,7 @@ static int zcrypt_requestq_count(void)
spin_unlock(&zq->queue->lock);
}
}
+ local_bh_enable();
spin_unlock(&zcrypt_list_lock);
return requestq_count;
}
@@ -1419,7 +1427,6 @@ void zcrypt_rng_device_remove(void)
int __init zcrypt_debug_init(void)
{
- zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL);
zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
@@ -1430,7 +1437,6 @@ int __init zcrypt_debug_init(void)
void zcrypt_debug_exit(void)
{
- debugfs_remove(zcrypt_dbf_root);
debug_unregister(zcrypt_dbf_info);
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 274a59051534..6c94efd23eac 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -190,5 +190,7 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
+long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus);
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6d4b68c483f3..e7addea8741b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -281,8 +281,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_HIGH_WATERMARK_PACK 5
#define QETH_WATERMARK_PACK_FUZZ 1
-#define QETH_IP_HEADER_SIZE 40
-
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
#define QETH_RX_PULL_LEN 256
@@ -674,8 +672,6 @@ struct qeth_card_info {
int broadcast_capable;
int unique_id;
struct qeth_card_blkt blkt;
- __u32 csum_mask;
- __u32 tx_csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
__u32 diagass_support;
__u32 hwtrap;
@@ -917,7 +913,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
-int qeth_send_startlan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e33558313834..315d8a2db7c0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
-int qeth_send_startlan(struct qeth_card *card)
+static int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_startlan);
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -5087,6 +5086,20 @@ retriable:
goto out;
}
+ rc = qeth_send_startlan(card);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ if (rc == IPA_RC_LAN_OFFLINE) {
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
+ card->lan_online = 0;
+ } else {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else
+ card->lan_online = 1;
+
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5098,14 +5111,14 @@ retriable:
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out;
}
}
@@ -5289,18 +5302,6 @@ int qeth_setassparms_cb(struct qeth_card *card,
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
}
- if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
- }
- if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.tx_csum_mask =
- cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -6060,23 +6061,96 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
+/* Callback to handle checksum offload command reply from OSA card.
+ * Verify that required features have been enabled on the card.
+ * Return error in hdr->return_code as this value is checked by caller.
+ *
+ * Always returns zero to indicate no further messages from the OSA card.
+ */
+static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_checksum_cmd *chksum_cb =
+ (struct qeth_checksum_cmd *)reply->param;
+
+ QETH_CARD_TEXT(card, 4, "chkdoccb");
+ if (cmd->hdr.return_code)
+ return 0;
+
+ memset(chksum_cb, 0, sizeof(*chksum_cb));
+ if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+ chksum_cb->supported =
+ cmd->data.setassparms.data.chksum.supported;
+ QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
+ }
+ if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
+ chksum_cb->supported =
+ cmd->data.setassparms.data.chksum.supported;
+ chksum_cb->enabled =
+ cmd->data.setassparms.data.chksum.enabled;
+ QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
+ QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
+ }
+ return 0;
+}
+
+/* Send command to OSA card and check results. */
+static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ __u16 cmd_code, long data,
+ struct qeth_checksum_cmd *chksum_cb)
+{
+ struct qeth_cmd_buffer *iob;
+ int rc = -ENOMEM;
+
+ QETH_CARD_TEXT(card, 4, "chkdocmd");
+ iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+ sizeof(__u32), QETH_PROT_IPV4);
+ if (iob)
+ rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
+ qeth_ipa_checksum_run_cmd_cb,
+ chksum_cb);
+ return rc;
+}
+
static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
{
- long rxtx_arg;
+ const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR |
+ QETH_IPA_CHECKSUM_UDP |
+ QETH_IPA_CHECKSUM_TCP;
+ struct qeth_checksum_cmd chksum_cb;
int rc;
- rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0);
+ rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
+ &chksum_cb);
+ if (!rc) {
+ if ((required_features & chksum_cb.supported) !=
+ required_features)
+ rc = -EIO;
+ else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
+ cstype == IPA_INBOUND_CHECKSUM)
+ dev_warn(&card->gdev->dev,
+ "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
+ QETH_CARD_IFNAME(card));
+ }
if (rc) {
+ qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
dev_warn(&card->gdev->dev,
"Starting HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
return rc;
}
- rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask
- : card->info.csum_mask;
- rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE,
- rxtx_arg);
+ rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
+ chksum_cb.supported, &chksum_cb);
+ if (!rc) {
+ if ((required_features & chksum_cb.enabled) !=
+ required_features)
+ rc = -EIO;
+ }
if (rc) {
+ qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
dev_warn(&card->gdev->dev,
"Enabling HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
@@ -6090,19 +6164,10 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
{
- int rc;
-
- if (on) {
- rc = qeth_send_checksum_on(card, cstype);
- if (rc)
- return -EIO;
- } else {
- rc = qeth_send_simple_setassparms(card, cstype,
- IPA_CMD_ASS_STOP, 0);
- if (rc)
- return -EIO;
- }
- return 0;
+ int rc = (on) ? qeth_send_checksum_on(card, cstype)
+ : qeth_send_simple_setassparms(card, cstype,
+ IPA_CMD_ASS_STOP, 0);
+ return rc ? -EIO : 0;
}
static int qeth_set_ipa_tso(struct qeth_card *card, int on)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6cccc9a49ede..bc69d0a338ad 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -352,11 +352,28 @@ struct qeth_arp_query_info {
char *udata;
};
+/* IPA set assist segmentation bit definitions for receive and
+ * transmit checksum offloading.
+ */
+enum qeth_ipa_checksum_bits {
+ QETH_IPA_CHECKSUM_IP_HDR = 0x0002,
+ QETH_IPA_CHECKSUM_UDP = 0x0008,
+ QETH_IPA_CHECKSUM_TCP = 0x0010,
+ QETH_IPA_CHECKSUM_LP2LP = 0x0020
+};
+
+/* IPA Assist checksum offload reply layout. */
+struct qeth_checksum_cmd {
+ __u32 supported;
+ __u32 enabled;
+} __packed;
+
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
+ struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
__u8 ip[16];
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 9c921c2833f1..bea483307618 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,9 +27,6 @@
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
-static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
-static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
- enum qeth_ipa_cmds);
static void qeth_l2_set_rx_mode(struct net_device *);
static int qeth_l2_recover(void *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -165,13 +162,70 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
return rc;
}
+static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+ enum qeth_ipa_cmds ipacmd)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "L2sdmac");
+ iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
+ memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+ return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob,
+ NULL, NULL));
+}
+
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+{
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
+ if (rc == 0) {
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ dev_info(&card->gdev->dev,
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
+ } else {
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ switch (rc) {
+ case -EEXIST:
+ dev_warn(&card->gdev->dev,
+ "MAC address %pM already exists\n", mac);
+ break;
+ case -EPERM:
+ dev_warn(&card->gdev->dev,
+ "MAC address %pM is not authorized\n", mac);
+ break;
+ }
+ }
+ return rc;
+}
+
+static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
+{
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "L2Delmac");
+ if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ return 0;
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
+ if (rc == 0)
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ return rc;
+}
+
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
int rc;
QETH_CARD_TEXT(card, 2, "L2Sgmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_SETGMAC));
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
if (rc == -EEXIST)
QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
mac, QETH_CARD_IFNAME(card));
@@ -186,8 +240,7 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
int rc;
QETH_CARD_TEXT(card, 2, "L2Dgmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_DELGMAC));
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
if (rc)
QETH_DBF_MESSAGE(2,
"Could not delete group MAC %pM on %s: %d\n",
@@ -195,28 +248,27 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static inline u32 qeth_l2_mac_hash(const u8 *addr)
+static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
{
- return get_unaligned((u32 *)(&addr[2]));
+ if (mac->is_uc) {
+ return qeth_l2_send_setdelmac(card, mac->mac_addr,
+ IPA_CMD_SETVMAC);
+ } else {
+ return qeth_l2_send_setgroupmac(card, mac->mac_addr);
+ }
}
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
{
-
- int rc;
-
if (mac->is_uc) {
- rc = qeth_setdel_makerc(card,
- qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_SETVMAC));
+ return qeth_l2_send_setdelmac(card, mac->mac_addr,
+ IPA_CMD_DELVMAC);
} else {
- rc = qeth_setdel_makerc(card,
- qeth_l2_send_setgroupmac(card, mac->mac_addr));
+ return qeth_l2_send_delgroupmac(card, mac->mac_addr);
}
- return rc;
}
-static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
+static void qeth_l2_del_all_macs(struct qeth_card *card)
{
struct qeth_mac *mac;
struct hlist_node *tmp;
@@ -224,19 +276,17 @@ static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
spin_lock_bh(&card->mclock);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
- if (del) {
- if (mac->is_uc)
- qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- else
- qeth_l2_send_delgroupmac(card, mac->mac_addr);
- }
hash_del(&mac->hnode);
kfree(mac);
}
spin_unlock_bh(&card->mclock);
}
+static inline u32 qeth_l2_mac_hash(const u8 *addr)
+{
+ return get_unaligned((u32 *)(&addr[2]));
+}
+
static inline int qeth_l2_get_cast_type(struct qeth_card *card,
struct sk_buff *skb)
{
@@ -425,7 +475,7 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
card->state = CARD_STATE_SOFTSETUP;
}
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l2_del_all_macs(card, 0);
+ qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
@@ -577,65 +627,6 @@ out:
return work_done;
}
-static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
- enum qeth_ipa_cmds ipacmd)
-{
- struct qeth_ipa_cmd *cmd;
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 2, "L2sdmac");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
- if (!iob)
- return -ENOMEM;
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
- memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Setmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_SETVMAC));
- if (rc == 0) {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
- dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
- } else {
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- switch (rc) {
- case -EEXIST:
- dev_warn(&card->gdev->dev,
- "MAC address %pM already exists\n", mac);
- break;
- case -EPERM:
- dev_warn(&card->gdev->dev,
- "MAC address %pM is not authorized\n", mac);
- break;
- }
- }
- return rc;
-}
-
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Delmac");
- if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_DELVMAC));
- if (rc == 0)
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- return rc;
-}
-
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
@@ -794,14 +785,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
- if (!mac->is_uc)
- rc = qeth_l2_send_delgroupmac(card,
- mac->mac_addr);
- else {
- rc = qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- }
-
+ qeth_l2_remove_mac(card, mac);
hash_del(&mac->hnode);
kfree(mac);
@@ -1193,21 +1177,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
rc = qeth_l2_start_ipassists(card);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ac37d050e765..06d0addcc058 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3227,21 +3227,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0e00a5ce0f00..05e9471e3d3f 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -250,9 +250,6 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
- if (card->state == CARD_STATE_DOWN)
- return -EPERM;
-
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
EBCASC(tmp_hsuid, 8);
return sprintf(buf, "%s\n", tmp_hsuid);
@@ -692,15 +689,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -708,16 +705,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -854,15 +852,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -870,16 +868,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 75f820ca17b7..27ff38f839fc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -1583,7 +1583,7 @@ out:
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req = NULL;
+ struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
- if (req && !IS_ERR(req))
+ if (!retval)
zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}
@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
- struct zfcp_fsf_req *req = NULL;
+ struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
- if (req && !IS_ERR(req))
+ if (!retval)
zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 07ffdbb5107f..0678cf714c0e 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -330,6 +330,7 @@ static struct scsi_host_template zfcp_scsi_host_template = {
.module = THIS_MODULE,
.name = "zfcp",
.queuecommand = zfcp_scsi_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 070c4da95f48..648373cde4a1 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -661,7 +661,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
/* no error, just fall back to legacy interrupts */
- vcdev->is_thinint = 0;
+ vcdev->is_thinint = false;
}
if (!vcdev->is_thinint) {
/* Register queue indicators with host. */