diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/s390/kernel/base.S | 3 | ||||
-rw-r--r-- | arch/s390/kernel/cache.c | 390 | ||||
-rw-r--r-- | arch/s390/kernel/compat_signal.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/dis.c | 9 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 20 | ||||
-rw-r--r-- | arch/s390/kernel/entry.h | 4 | ||||
-rw-r--r-- | arch/s390/kernel/ftrace.c | 108 | ||||
-rw-r--r-- | arch/s390/kernel/head.S | 4 | ||||
-rw-r--r-- | arch/s390/kernel/ipl.c | 11 | ||||
-rw-r--r-- | arch/s390/kernel/jump_label.c | 63 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 3 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 19 | ||||
-rw-r--r-- | arch/s390/kernel/mcount.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/module.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/process.c | 18 | ||||
-rw-r--r-- | arch/s390/kernel/processor.c | 10 | ||||
-rw-r--r-- | arch/s390/kernel/sclp.S | 3 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/signal.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 315 | ||||
-rw-r--r-- | arch/s390/kernel/sysinfo.c | 37 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 181 | ||||
-rw-r--r-- | arch/s390/kernel/vdso64/clock_gettime.S | 6 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 58 |
25 files changed, 707 insertions, 571 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 204c43a4c245..31fab2676fe9 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -4,8 +4,8 @@ ifdef CONFIG_FUNCTION_TRACER # Don't trace early setup code and tracing code -CFLAGS_REMOVE_early.o = -pg -CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) endif # diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 797a823a2275..f74a53d339b0 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -97,7 +97,8 @@ ENTRY(diag308_reset) lg %r4,0(%r4) # Save PSW sturg %r4,%r3 # Use sturg, because of large pages lghi %r1,1 - diag %r1,%r1,0x308 + lghi %r0,0 + diag %r0,%r1,0x308 .Lrestart_part2: lhi %r0,0 # Load r0 with zero lhi %r1,2 # Use mode 2 = ESAME (dump) diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index c0b03c28d157..0969d113b3d6 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -5,37 +5,11 @@ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ -#include <linux/notifier.h> #include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/list.h> -#include <linux/slab.h> #include <linux/cpu.h> +#include <linux/cacheinfo.h> #include <asm/facility.h> -struct cache { - unsigned long size; - unsigned int line_size; - unsigned int associativity; - unsigned int nr_sets; - unsigned int level : 3; - unsigned int type : 2; - unsigned int private : 1; - struct list_head list; -}; - -struct cache_dir { - struct kobject *kobj; - struct cache_index_dir *index; -}; - -struct cache_index_dir { - struct kobject kobj; - int cpu; - struct cache *cache; - struct cache_index_dir *next; -}; - enum { CACHE_SCOPE_NOTEXISTS, CACHE_SCOPE_PRIVATE, @@ -44,10 +18,10 @@ enum { }; enum { - CACHE_TYPE_SEPARATE, - CACHE_TYPE_DATA, - CACHE_TYPE_INSTRUCTION, - CACHE_TYPE_UNIFIED, + CTYPE_SEPARATE, + CTYPE_DATA, + CTYPE_INSTRUCTION, + CTYPE_UNIFIED, }; enum { @@ -70,37 +44,57 @@ struct cache_info { }; #define CACHE_MAX_LEVEL 8 - union cache_topology { struct cache_info ci[CACHE_MAX_LEVEL]; unsigned long long raw; }; static const char * const cache_type_string[] = { - "Data", + "", "Instruction", + "Data", + "", "Unified", }; -static struct cache_dir *cache_dir_cpu[NR_CPUS]; -static LIST_HEAD(cache_list); +static const enum cache_type cache_type_map[] = { + [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE, + [CTYPE_DATA] = CACHE_TYPE_DATA, + [CTYPE_INSTRUCTION] = CACHE_TYPE_INST, + [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, +}; void show_cacheinfo(struct seq_file *m) { - struct cache *cache; - int index = 0; + struct cpu_cacheinfo *this_cpu_ci; + struct cacheinfo *cache; + int idx; - list_for_each_entry(cache, &cache_list, list) { - seq_printf(m, "cache%-11d: ", index); + get_online_cpus(); + this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); + for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { + cache = this_cpu_ci->info_list + idx; + seq_printf(m, "cache%-11d: ", idx); seq_printf(m, "level=%d ", cache->level); seq_printf(m, "type=%s ", cache_type_string[cache->type]); - seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared"); - seq_printf(m, "size=%luK ", cache->size >> 10); - seq_printf(m, "line_size=%u ", cache->line_size); - seq_printf(m, "associativity=%d", cache->associativity); + seq_printf(m, "scope=%s ", + cache->disable_sysfs ? "Shared" : "Private"); + seq_printf(m, "size=%dK ", cache->size >> 10); + seq_printf(m, "line_size=%u ", cache->coherency_line_size); + seq_printf(m, "associativity=%d", cache->ways_of_associativity); seq_puts(m, "\n"); - index++; } + put_online_cpus(); +} + +static inline enum cache_type get_cache_type(struct cache_info *ci, int level) +{ + if (level >= CACHE_MAX_LEVEL) + return CACHE_TYPE_NOCACHE; + ci += level; + if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) + return CACHE_TYPE_NOCACHE; + return cache_type_map[ci->type]; } static inline unsigned long ecag(int ai, int li, int ti) @@ -113,277 +107,71 @@ static inline unsigned long ecag(int ai, int li, int ti) return val; } -static int __init cache_add(int level, int private, int type) +static void ci_leaf_init(struct cacheinfo *this_leaf, int private, + enum cache_type type, unsigned int level, int cpu) { - struct cache *cache; - int ti; + int ti, num_sets; - cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (!cache) - return -ENOMEM; - if (type == CACHE_TYPE_INSTRUCTION) + if (type == CACHE_TYPE_INST) ti = CACHE_TI_INSTRUCTION; else ti = CACHE_TI_UNIFIED; - cache->size = ecag(EXTRACT_SIZE, level, ti); - cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti); - cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); - cache->nr_sets = cache->size / cache->associativity; - cache->nr_sets /= cache->line_size; - cache->private = private; - cache->level = level + 1; - cache->type = type - 1; - list_add_tail(&cache->list, &cache_list); - return 0; -} - -static void __init cache_build_info(void) -{ - struct cache *cache, *next; + this_leaf->level = level + 1; + this_leaf->type = type; + this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); + this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); + this_leaf->size = ecag(EXTRACT_SIZE, level, ti); + num_sets = this_leaf->size / this_leaf->coherency_line_size; + num_sets /= this_leaf->ways_of_associativity; + this_leaf->number_of_sets = num_sets; + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + if (!private) + this_leaf->disable_sysfs = true; +} + +int init_cache_level(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + unsigned int level = 0, leaves = 0; union cache_topology ct; - int level, private, rc; + enum cache_type ctype; + if (!this_cpu_ci) + return -EINVAL; ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); - for (level = 0; level < CACHE_MAX_LEVEL; level++) { - switch (ct.ci[level].scope) { - case CACHE_SCOPE_SHARED: - private = 0; - break; - case CACHE_SCOPE_PRIVATE: - private = 1; + do { + ctype = get_cache_type(&ct.ci[0], level); + if (ctype == CACHE_TYPE_NOCACHE) break; - default: - return; - } - if (ct.ci[level].type == CACHE_TYPE_SEPARATE) { - rc = cache_add(level, private, CACHE_TYPE_DATA); - rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION); - } else { - rc = cache_add(level, private, ct.ci[level].type); - } - if (rc) - goto error; - } - return; -error: - list_for_each_entry_safe(cache, next, &cache_list, list) { - list_del(&cache->list); - kfree(cache); - } -} - -static struct cache_dir *cache_create_cache_dir(int cpu) -{ - struct cache_dir *cache_dir; - struct kobject *kobj = NULL; - struct device *dev; - - dev = get_cpu_device(cpu); - if (!dev) - goto out; - kobj = kobject_create_and_add("cache", &dev->kobj); - if (!kobj) - goto out; - cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); - if (!cache_dir) - goto out; - cache_dir->kobj = kobj; - cache_dir_cpu[cpu] = cache_dir; - return cache_dir; -out: - kobject_put(kobj); - return NULL; -} - -static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj) -{ - return container_of(kobj, struct cache_index_dir, kobj); -} - -static void cache_index_release(struct kobject *kobj) -{ - struct cache_index_dir *index; - - index = kobj_to_cache_index_dir(kobj); - kfree(index); -} - -static ssize_t cache_index_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct kobj_attribute *kobj_attr; - - kobj_attr = container_of(attr, struct kobj_attribute, attr); - return kobj_attr->show(kobj, kobj_attr, buf); -} - -#define DEFINE_CACHE_ATTR(_name, _format, _value) \ -static ssize_t cache_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ - char *buf) \ -{ \ - struct cache_index_dir *index; \ - \ - index = kobj_to_cache_index_dir(kobj); \ - return sprintf(buf, _format, _value); \ -} \ -static struct kobj_attribute cache_##_name##_attr = \ - __ATTR(_name, 0444, cache_##_name##_show, NULL); - -DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10); -DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size); -DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets); -DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity); -DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]); -DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level); - -static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf) -{ - struct cache_index_dir *index; - int len; - - index = kobj_to_cache_index_dir(kobj); - len = type ? - cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) : - cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)); - len += sprintf(&buf[len], "\n"); - return len; -} - -static ssize_t shared_cpu_map_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return shared_cpu_map_func(kobj, 0, buf); -} -static struct kobj_attribute cache_shared_cpu_map_attr = - __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); - -static ssize_t shared_cpu_list_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return shared_cpu_map_func(kobj, 1, buf); -} -static struct kobj_attribute cache_shared_cpu_list_attr = - __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); - -static struct attribute *cache_index_default_attrs[] = { - &cache_type_attr.attr, - &cache_size_attr.attr, - &cache_number_of_sets_attr.attr, - &cache_ways_of_associativity_attr.attr, - &cache_level_attr.attr, - &cache_coherency_line_size_attr.attr, - &cache_shared_cpu_map_attr.attr, - &cache_shared_cpu_list_attr.attr, - NULL, -}; - -static const struct sysfs_ops cache_index_ops = { - .show = cache_index_show, -}; - -static struct kobj_type cache_index_type = { - .sysfs_ops = &cache_index_ops, - .release = cache_index_release, - .default_attrs = cache_index_default_attrs, -}; - -static int cache_create_index_dir(struct cache_dir *cache_dir, - struct cache *cache, int index, int cpu) -{ - struct cache_index_dir *index_dir; - int rc; - - index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); - if (!index_dir) - return -ENOMEM; - index_dir->cache = cache; - index_dir->cpu = cpu; - rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, - cache_dir->kobj, "index%d", index); - if (rc) - goto out; - index_dir->next = cache_dir->index; - cache_dir->index = index_dir; - return 0; -out: - kfree(index_dir); - return rc; -} - -static int cache_add_cpu(int cpu) -{ - struct cache_dir *cache_dir; - struct cache *cache; - int rc, index = 0; - - if (list_empty(&cache_list)) - return 0; - cache_dir = cache_create_cache_dir(cpu); - if (!cache_dir) - return -ENOMEM; - list_for_each_entry(cache, &cache_list, list) { - if (!cache->private) - break; - rc = cache_create_index_dir(cache_dir, cache, index, cpu); - if (rc) - return rc; - index++; - } + /* Separate instruction and data caches */ + leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; + } while (++level < CACHE_MAX_LEVEL); + this_cpu_ci->num_levels = level; + this_cpu_ci->num_leaves = leaves; return 0; } -static void cache_remove_cpu(int cpu) -{ - struct cache_index_dir *index, *next; - struct cache_dir *cache_dir; - - cache_dir = cache_dir_cpu[cpu]; - if (!cache_dir) - return; - index = cache_dir->index; - while (index) { - next = index->next; - kobject_put(&index->kobj); - index = next; - } - kobject_put(cache_dir->kobj); - kfree(cache_dir); - cache_dir_cpu[cpu] = NULL; -} - -static int cache_hotplug(struct notifier_block *nfb, unsigned long action, - void *hcpu) +int populate_cache_leaves(unsigned int cpu) { - int cpu = (long)hcpu; - int rc = 0; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + unsigned int level, idx, pvt; + union cache_topology ct; + enum cache_type ctype; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - rc = cache_add_cpu(cpu); - if (rc) - cache_remove_cpu(cpu); - break; - case CPU_DEAD: - cache_remove_cpu(cpu); - break; + ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); + for (idx = 0, level = 0; level < this_cpu_ci->num_levels && + idx < this_cpu_ci->num_leaves; idx++, level++) { + if (!this_leaf) + return -EINVAL; + pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; + ctype = get_cache_type(&ct.ci[0], level); + if (ctype == CACHE_TYPE_SEPARATE) { + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu); + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu); + } else { + ci_leaf_init(this_leaf++, pvt, ctype, level, cpu); + } } - return rc ? NOTIFY_BAD : NOTIFY_OK; -} - -static int __init cache_init(void) -{ - int cpu; - - if (!test_facility(34)) - return 0; - cache_build_info(); - - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) - cache_add_cpu(cpu); - __hotcpu_notifier(cache_hotplug, 0); - cpu_notifier_register_done(); return 0; } -device_initcall(cache_init); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 34d5fa7b01b5..bc1df12dd4f8 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -209,7 +209,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) int i; /* Alwys make any pending restarted system call return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs))) return -EFAULT; diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index f3762937dd82..533430307da8 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -137,7 +137,7 @@ enum { INSTR_RSI_RRP, INSTR_RSL_LRDFU, INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, - INSTR_RSY_RDRM, + INSTR_RSY_RDRM, INSTR_RSY_RMRD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM, @@ -226,7 +226,6 @@ static const struct s390_operand operands[] = [U16_32] = { 16, 32, 0 }, [J16_16] = { 16, 16, OPERAND_PCREL }, [J16_32] = { 16, 32, OPERAND_PCREL }, - [I16_32] = { 16, 32, OPERAND_SIGNED }, [I24_24] = { 24, 24, OPERAND_SIGNED }, [J32_16] = { 32, 16, OPERAND_PCREL }, [I32_16] = { 32, 16, OPERAND_SIGNED }, @@ -308,6 +307,7 @@ static const unsigned char formats[][7] = { [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, + [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, @@ -451,7 +451,8 @@ enum { LONG_INSN_VERLLV, LONG_INSN_VESRAV, LONG_INSN_VESRLV, - LONG_INSN_VSBCBI + LONG_INSN_VSBCBI, + LONG_INSN_STCCTM }; static char *long_insn_name[] = { @@ -531,6 +532,7 @@ static char *long_insn_name[] = { [LONG_INSN_VESRAV] = "vesrav", [LONG_INSN_VESRLV] = "vesrlv", [LONG_INSN_VSBCBI] = "vsbcbi", + [LONG_INSN_STCCTM] = "stcctm", }; static struct s390_insn opcode[] = { @@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = { { "lric", 0x60, INSTR_RSY_RDRM }, { "stric", 0x61, INSTR_RSY_RDRM }, { "mric", 0x62, INSTR_RSY_RDRM }, + { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, #endif { "rll", 0x1d, INSTR_RSY_RRRD }, { "mvclu", 0x8e, INSTR_RSY_RRRD }, diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 302ac1f7f8e7..4427ab7ac23a 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -396,6 +396,26 @@ static __init void detect_machine_facilities(void) #endif } +static int __init cad_setup(char *str) +{ + int val; + + get_option(&str, &val); + if (val && test_facility(128)) + S390_lowcore.machine_flags |= MACHINE_FLAG_CAD; + return 0; +} +early_param("cad", cad_setup); + +static int __init cad_init(void) +{ + if (MACHINE_HAS_CAD) + /* Enable problem state CAD. */ + __ctl_set_bit(2, 3); + return 0; +} +early_initcall(cad_init); + static __init void rescue_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 8e61393c8275..834df047d35f 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -71,9 +71,11 @@ struct s390_mmap_arg_struct; struct fadvise64_64_args; struct old_sigaction; +long sys_rt_sigreturn(void); +long sys_sigreturn(void); + long sys_s390_personality(unsigned int personality); long sys_s390_runtime_instr(int command, int signum); - long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t); long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); #endif /* _ENTRY_H */ diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index b86bb8823f15..82c19899574f 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -46,6 +46,13 @@ * lg %r14,8(%r15) # offset 18 * The jg instruction branches to offset 24 to skip as many instructions * as possible. + * In case we use gcc's hotpatch feature the original and also the disabled + * function prologue contains only a single six byte instruction and looks + * like this: + * > brcl 0,0 # offset 0 + * To enable ftrace the code gets patched like above and afterwards looks + * like this: + * > brasl %r0,ftrace_caller # offset 0 */ unsigned long ftrace_plt; @@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - struct ftrace_insn insn; - unsigned short op; - void *from, *to; - size_t size; - - ftrace_generate_nop_insn(&insn); - size = sizeof(insn); - from = &insn; - to = (void *) rec->ip; - if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) + struct ftrace_insn orig, new, old; + + if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; - /* - * If we find a breakpoint instruction, a kprobe has been placed - * at the beginning of the function. We write the constant - * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original - * instruction so that the kprobes handler can execute a nop, if it - * reaches this breakpoint. - */ - if (op == BREAKPOINT_INSTRUCTION) { - size -= 2; - from += 2; - to += 2; - insn.disp = KPROBE_ON_FTRACE_NOP; + if (addr == MCOUNT_ADDR) { + /* Initial code replacement */ +#ifdef CC_USING_HOTPATCH + /* We expect to see brcl 0,0 */ + ftrace_generate_nop_insn(&orig); +#else + /* We expect to see stg r14,8(r15) */ + orig.opc = 0xe3e0; + orig.disp = 0xf0080024; +#endif + ftrace_generate_nop_insn(&new); + } else if (old.opc == BREAKPOINT_INSTRUCTION) { + /* + * If we find a breakpoint instruction, a kprobe has been + * placed at the beginning of the function. We write the + * constant KPROBE_ON_FTRACE_NOP into the remaining four + * bytes of the original instruction so that the kprobes + * handler can execute a nop, if it reaches this breakpoint. + */ + new.opc = orig.opc = BREAKPOINT_INSTRUCTION; + orig.disp = KPROBE_ON_FTRACE_CALL; + new.disp = KPROBE_ON_FTRACE_NOP; + } else { + /* Replace ftrace call with a nop. */ + ftrace_generate_call_insn(&orig, rec->ip); + ftrace_generate_nop_insn(&new); } - if (probe_kernel_write(to, from, size)) + /* Verify that the to be replaced code matches what we expect. */ + if (memcmp(&orig, &old, sizeof(old))) + return -EINVAL; + if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) return -EPERM; return 0; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { - struct ftrace_insn insn; - unsigned short op; - void *from, *to; - size_t size; - - ftrace_generate_call_insn(&insn, rec->ip); - size = sizeof(insn); - from = &insn; - to = (void *) rec->ip; - if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) + struct ftrace_insn orig, new, old; + + if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; - /* - * If we find a breakpoint instruction, a kprobe has been placed - * at the beginning of the function. We write the constant - * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original - * instruction so that the kprobes handler can execute a brasl if it - * reaches this breakpoint. - */ - if (op == BREAKPOINT_INSTRUCTION) { - size -= 2; - from += 2; - to += 2; - insn.disp = KPROBE_ON_FTRACE_CALL; + if (old.opc == BREAKPOINT_INSTRUCTION) { + /* + * If we find a breakpoint instruction, a kprobe has been + * placed at the beginning of the function. We write the + * constant KPROBE_ON_FTRACE_CALL into the remaining four + * bytes of the original instruction so that the kprobes + * handler can execute a brasl if it reaches this breakpoint. + */ + new.opc = orig.opc = BREAKPOINT_INSTRUCTION; + orig.disp = KPROBE_ON_FTRACE_NOP; + new.disp = KPROBE_ON_FTRACE_CALL; + } else { + /* Replace nop with an ftrace call. */ + ftrace_generate_nop_insn(&orig); + ftrace_generate_call_insn(&new, rec->ip); } - if (probe_kernel_write(to, from, size)) + /* Verify that the to be replaced code matches what we expect. */ + if (memcmp(&orig, &old, sizeof(old))) + return -EINVAL; + if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) return -EPERM; return 0; } diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index d62eee11f0b5..132f4c9ade60 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -436,7 +436,9 @@ ENTRY(startup_kdump) # followed by the facility words. #if defined(CONFIG_64BIT) -#if defined(CONFIG_MARCH_ZEC12) +#if defined(CONFIG_MARCH_Z13) + .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 +#elif defined(CONFIG_MARCH_ZEC12) .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 #elif defined(CONFIG_MARCH_Z196) .long 2, 0xc100eff2, 0xf46c0000 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 39badb9ca0b3..5c8651f36509 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -2074,7 +2074,8 @@ static void do_reset_calls(void) u32 dump_prefix_page; -void s390_reset_system(void (*func)(void *), void *data) +void s390_reset_system(void (*fn_pre)(void), + void (*fn_post)(void *), void *data) { struct _lowcore *lc; @@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data) /* Store status at absolute zero */ store_status(); + /* Call function before reset */ + if (fn_pre) + fn_pre(); do_reset_calls(); - if (func) - func(data); + /* Call function after reset */ + if (fn_post) + fn_post(data); } diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index b987ab2c1541..cb2d51e779df 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -22,31 +22,66 @@ struct insn_args { enum jump_label_type type; }; +static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn) +{ + /* brcl 0,0 */ + insn->opcode = 0xc004; + insn->offset = 0; +} + +static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) +{ + /* brcl 15,offset */ + insn->opcode = 0xc0f4; + insn->offset = (entry->target - entry->code) >> 1; +} + +static void jump_label_bug(struct jump_entry *entry, struct insn *insn) +{ + unsigned char *ipc = (unsigned char *)entry->code; + unsigned char *ipe = (unsigned char *)insn; + + pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); + pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", + ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); + pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", + ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); + panic("Corrupted kernel text"); +} + +static struct insn orignop = { + .opcode = 0xc004, + .offset = JUMP_LABEL_NOP_OFFSET >> 1, +}; + static void __jump_label_transform(struct jump_entry *entry, - enum jump_label_type type) + enum jump_label_type type, + int init) { - struct insn insn; - int rc; + struct insn old, new; if (type == JUMP_LABEL_ENABLE) { - /* brcl 15,offset */ - insn.opcode = 0xc0f4; - insn.offset = (entry->target - entry->code) >> 1; + jump_label_make_nop(entry, &old); + jump_label_make_branch(entry, &new); } else { - /* brcl 0,0 */ - insn.opcode = 0xc004; - insn.offset = 0; + jump_label_make_branch(entry, &old); + jump_label_make_nop(entry, &new); } - - rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE); - WARN_ON_ONCE(rc < 0); + if (init) { + if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) + jump_label_bug(entry, &old); + } else { + if (memcmp((void *)entry->code, &old, sizeof(old))) + jump_label_bug(entry, &old); + } + probe_kernel_write((void *)entry->code, &new, sizeof(new)); } static int __sm_arch_jump_label_transform(void *data) { struct insn_args *args = data; - __jump_label_transform(args->entry, args->type); + __jump_label_transform(args->entry, args->type, 0); return 0; } @@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry, void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { - __jump_label_transform(entry, type); + __jump_label_transform(entry, type, 1); } #endif diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 1e4c710dfb92..f516edc1fbe3 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p) /* * If kprobes patches the instruction that is morphed by * ftrace make sure that kprobes always sees the branch - * "jg .+24" that skips the mcount block + * "jg .+24" that skips the mcount block or the "brcl 0,0" + * in case of hotpatch. */ ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); p->ainsn.is_ftrace_insn = 1; diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 4685337fa7c6..fb0901ec4306 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void) return 0; } arch_initcall(machine_kdump_pm_init); -#endif /* * Start kdump: We expect here that a store status has been done on our CPU */ static void __do_machine_kdump(void *image) { -#ifdef CONFIG_CRASH_DUMP int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; - setup_regs(); __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); start_kdump(1); -#endif } +#endif /* * Check if kdump checksums are valid: We call purgatory with parameter "0" @@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data) */ static void __machine_kexec(void *data) { - struct kimage *image = data; - __arch_local_irq_stosm(0x04); /* enable DAT */ pfault_fini(); tracing_off(); debug_locks_off(); - if (image->type == KEXEC_TYPE_CRASH) { +#ifdef CONFIG_CRASH_DUMP + if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) { + lgr_info_log(); - s390_reset_system(__do_machine_kdump, data); - } else { - s390_reset_system(__do_machine_kexec, data); - } + s390_reset_system(setup_regs, __do_machine_kdump, data); + } else +#endif + s390_reset_system(NULL, __do_machine_kexec, data); disabled_wait((unsigned long) __builtin_return_address(0)); } diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index b6dfc5bfcb89..e499370fbccb 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -27,7 +27,9 @@ ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller lgr %r1,%r15 +#ifndef CC_USING_HOTPATCH aghi %r0,MCOUNT_RETURN_FIXUP +#endif aghi %r15,-STACK_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 409d152585be..36154a2f1814 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -50,7 +50,7 @@ void *module_alloc(unsigned long size) if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, + GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index aa7a83948c7b..13fc0978ca7e 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task) { } +#ifdef CONFIG_64BIT +void arch_release_task_struct(struct task_struct *tsk) +{ + if (tsk->thread.vxrs) + kfree(tsk->thread.vxrs); +} +#endif + int copy_thread(unsigned long clone_flags, unsigned long new_stackp, unsigned long arg, struct task_struct *p) { @@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ret = PAGE_ALIGN(mm->brk + brk_rnd()); return (ret > mm->brk) ? ret : mm->brk; } - -unsigned long randomize_et_dyn(unsigned long base) -{ - unsigned long ret; - - if (!(current->flags & PF_RANDOMIZE)) - return base; - ret = PAGE_ALIGN(base + brk_rnd()); - return (ret > base) ? ret : base; -} diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index dbdd33ee0102..26108232fcaa 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -8,16 +8,24 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/smp.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/cpu.h> #include <asm/elf.h> #include <asm/lowcore.h> #include <asm/param.h> +#include <asm/smp.h> static DEFINE_PER_CPU(struct cpuid, cpu_id); +void cpu_relax(void) +{ + if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) + asm volatile("diag 0,0,0x44"); + barrier(); +} +EXPORT_SYMBOL(cpu_relax); + /* * cpu_init - initializes state that is per-CPU. */ diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index a41f2c99dcc8..7e77e03378f3 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -294,7 +294,8 @@ ENTRY(_sclp_print_early) #ifdef CONFIG_64BIT tm LC_AR_MODE_ID,1 jno .Lesa3 - lmh %r6,%r15,96(%r15) # store upper register halves + lgfr %r2,%r2 # sign extend return value + lmh %r6,%r15,96(%r15) # restore upper register halves ahi %r15,80 .Lesa3: #endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4e532c67832f..a5ea8bc17cb3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -810,6 +810,9 @@ static void __init setup_hwcaps(void) case 0x2828: strcpy(elf_platform, "zEC12"); break; + case 0x2964: + strcpy(elf_platform, "z13"); + break; } } @@ -906,7 +909,6 @@ void __init setup_arch(char **cmdline_p) setup_lowcore(); smp_fill_possible_mask(); cpu_init(); - s390_init_cpu_topology(); /* * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6a2ac257d98f..b3ae6f70c6d6 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -162,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) _sigregs user_sregs; /* Alwys make any pending restarted system call return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs))) return -EFAULT; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 0b499f5cbe19..db8f1115a3bf 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,21 +59,41 @@ enum { CPU_STATE_CONFIGURED, }; +static DEFINE_PER_CPU(struct cpu *, cpu_device); + struct pcpu { - struct cpu *cpu; struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ - unsigned long async_stack; /* async stack for the cpu */ - unsigned long panic_stack; /* panic stack for the cpu */ unsigned long ec_mask; /* bit mask for ec_xxx functions */ - int state; /* physical cpu state */ - int polarization; /* physical polarization */ + signed char state; /* physical cpu state */ + signed char polarization; /* physical polarization */ u16 address; /* physical cpu address */ }; static u8 boot_cpu_type; -static u16 boot_cpu_address; static struct pcpu pcpu_devices[NR_CPUS]; +unsigned int smp_cpu_mt_shift; +EXPORT_SYMBOL(smp_cpu_mt_shift); + +unsigned int smp_cpu_mtid; +EXPORT_SYMBOL(smp_cpu_mtid); + +static unsigned int smp_max_threads __initdata = -1U; + +static int __init early_nosmt(char *s) +{ + smp_max_threads = 1; + return 0; +} +early_param("nosmt", early_nosmt); + +static int __init early_smt(char *s) +{ + get_option(&s, &smp_max_threads); + return 0; +} +early_param("smt", early_smt); + /* * The smp_cpu_state_mutex must be held when changing the state or polarization * member of a pcpu data structure within the pcpu_devices arreay. @@ -132,7 +152,7 @@ static inline int pcpu_running(struct pcpu *pcpu) /* * Find struct pcpu by cpu address. */ -static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) +static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address) { int cpu; @@ -152,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) pcpu_sigp_retry(pcpu, order, 0); } +#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) +#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) + static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) { + unsigned long async_stack, panic_stack; struct _lowcore *lc; if (pcpu != &pcpu_devices[0]) { pcpu->lowcore = (struct _lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); - pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); - pcpu->panic_stack = __get_free_page(GFP_KERNEL); - if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) + async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + panic_stack = __get_free_page(GFP_KERNEL); + if (!pcpu->lowcore || !panic_stack || !async_stack) goto out; + } else { + async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET; + panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET; } lc = pcpu->lowcore; memcpy(lc, &S390_lowcore, 512); memset((char *) lc + 512, 0, sizeof(*lc) - 512); - lc->async_stack = pcpu->async_stack + ASYNC_SIZE - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); - lc->panic_stack = pcpu->panic_stack + PAGE_SIZE - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->async_stack = async_stack + ASYNC_FRAME_OFFSET; + lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); #ifndef CONFIG_64BIT @@ -191,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) return 0; out: if (pcpu != &pcpu_devices[0]) { - free_page(pcpu->panic_stack); - free_pages(pcpu->async_stack, ASYNC_ORDER); + free_page(panic_stack); + free_pages(async_stack, ASYNC_ORDER); free_pages((unsigned long) pcpu->lowcore, LC_ORDER); } return -ENOMEM; @@ -214,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) #else vdso_free_per_cpu(pcpu->lowcore); #endif - if (pcpu != &pcpu_devices[0]) { - free_page(pcpu->panic_stack); - free_pages(pcpu->async_stack, ASYNC_ORDER); - free_pages((unsigned long) pcpu->lowcore, LC_ORDER); - } + if (pcpu == &pcpu_devices[0]) + return; + free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); + free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER); + free_pages((unsigned long) pcpu->lowcore, LC_ORDER); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -299,6 +324,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), } /* + * Enable additional logical cpus for multi-threading. + */ +static int pcpu_set_smt(unsigned int mtid) +{ + register unsigned long reg1 asm ("1") = (unsigned long) mtid; + int cc; + + if (smp_cpu_mtid == mtid) + return 0; + asm volatile( + " sigp %1,0,%2 # sigp set multi-threading\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING) + : "cc"); + if (cc == 0) { + smp_cpu_mtid = mtid; + smp_cpu_mt_shift = 0; + while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift)) + smp_cpu_mt_shift++; + pcpu_devices[0].address = stap(); + } + return cc; +} + +/* * Call function on an online CPU. */ void smp_call_online_cpu(void (*func)(void *), void *data) @@ -319,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data) void smp_call_ipl_cpu(void (*func)(void *), void *data) { pcpu_delegate(&pcpu_devices[0], func, data, - pcpu_devices->panic_stack + PAGE_SIZE); + pcpu_devices->lowcore->panic_stack - + PANIC_FRAME_OFFSET + PAGE_SIZE); } int smp_find_processor_id(u16 address) @@ -512,22 +564,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); #ifdef CONFIG_CRASH_DUMP -static void __init smp_get_save_area(int cpu, u16 address) +static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) { void *lc = pcpu_devices[0].lowcore; struct save_area_ext *sa_ext; unsigned long vx_sa; - if (is_kdump_kernel()) - return; - if (!OLDMEM_BASE && (address == boot_cpu_address || - ipl_info.type != IPL_TYPE_FCP_DUMP)) - return; sa_ext = dump_save_area_create(cpu); if (!sa_ext) panic("could not allocate memory for save area\n"); - if (address == boot_cpu_address) { - /* Copy the registers of the boot cpu. */ + if (is_boot_cpu) { + /* Copy the registers of the boot CPU. */ copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), SAVE_AREA_BASE - PAGE_SIZE, 0); if (MACHINE_HAS_VX) @@ -548,6 +595,64 @@ static void __init smp_get_save_area(int cpu, u16 address) free_page(vx_sa); } +/* + * Collect CPU state of the previous, crashed system. + * There are four cases: + * 1) standard zfcp dump + * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP + * The state for all CPUs except the boot CPU needs to be collected + * with sigp stop-and-store-status. The boot CPU state is located in + * the absolute lowcore of the memory stored in the HSA. The zcore code + * will allocate the save area and copy the boot CPU state from the HSA. + * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) + * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP + * The state for all CPUs except the boot CPU needs to be collected + * with sigp stop-and-store-status. The firmware or the boot-loader + * stored the registers of the boot CPU in the absolute lowcore in the + * memory of the old system. + * 3) kdump and the old kernel did not store the CPU state, + * or stand-alone kdump for DASD + * condition: OLDMEM_BASE != NULL && !is_kdump_kernel() + * The state for all CPUs except the boot CPU needs to be collected + * with sigp stop-and-store-status. The kexec code or the boot-loader + * stored the registers of the boot CPU in the memory of the old system. + * 4) kdump and the old kernel stored the CPU state + * condition: OLDMEM_BASE != NULL && is_kdump_kernel() + * The state of all CPUs is stored in ELF sections in the memory of the + * old system. The ELF sections are picked up by the crash_dump code + * via elfcorehdr_addr. + */ +static void __init smp_store_cpu_states(struct sclp_cpu_info *info) +{ + unsigned int cpu, address, i, j; + int is_boot_cpu; + + if (is_kdump_kernel()) + /* Previous system stored the CPU states. Nothing to do. */ + return; + if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) + /* No previous system present, normal boot. */ + return; + /* Set multi-threading state to the previous system. */ + pcpu_set_smt(sclp_get_mtid_prev()); + /* Collect CPU states. */ + cpu = 0; + for (i = 0; i < info->configured; i++) { + /* Skip CPUs with different CPU type. */ + if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) + continue; + for (j = 0; j <= smp_cpu_mtid; j++, cpu++) { + address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j; + is_boot_cpu = (address == pcpu_devices[0].address); + if (is_boot_cpu && !OLDMEM_BASE) + /* Skip boot CPU for standard zfcp dump. */ + continue; + /* Get state for this CPu. */ + __smp_store_cpu_state(cpu, address, is_boot_cpu); + } + } +} + int smp_store_status(int cpu) { unsigned long vx_sa; @@ -565,10 +670,6 @@ int smp_store_status(int cpu) return 0; } -#else /* CONFIG_CRASH_DUMP */ - -static inline void smp_get_save_area(int cpu, u16 address) { } - #endif /* CONFIG_CRASH_DUMP */ void smp_cpu_set_polarization(int cpu, int val) @@ -590,11 +691,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) info = kzalloc(sizeof(*info), GFP_KERNEL); if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { use_sigp_detection = 1; - for (address = 0; address <= MAX_CPU_ADDRESS; address++) { + for (address = 0; address <= MAX_CPU_ADDRESS; + address += (1U << smp_cpu_mt_shift)) { if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == SIGP_CC_NOT_OPERATIONAL) continue; - info->cpu[info->configured].address = address; + info->cpu[info->configured].core_id = + address >> smp_cpu_mt_shift; info->configured++; } info->combined = info->configured; @@ -608,7 +711,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) { struct pcpu *pcpu; cpumask_t avail; - int cpu, nr, i; + int cpu, nr, i, j; + u16 address; nr = 0; cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); @@ -616,51 +720,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) continue; - if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) - continue; - pcpu = pcpu_devices + cpu; - pcpu->address = info->cpu[i].address; - pcpu->state = (i >= info->configured) ? - CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); - set_cpu_present(cpu, true); - if (sysfs_add && smp_add_present_cpu(cpu) != 0) - set_cpu_present(cpu, false); - else - nr++; - cpu = cpumask_next(cpu, &avail); + address = info->cpu[i].core_id << smp_cpu_mt_shift; + for (j = 0; j <= smp_cpu_mtid; j++) { + if (pcpu_find_address(cpu_present_mask, address + j)) + continue; + pcpu = pcpu_devices + cpu; + pcpu->address = address + j; + pcpu->state = + (cpu >= info->configured*(smp_cpu_mtid + 1)) ? + CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + set_cpu_present(cpu, true); + if (sysfs_add && smp_add_present_cpu(cpu) != 0) + set_cpu_present(cpu, false); + else + nr++; + cpu = cpumask_next(cpu, &avail); + if (cpu >= nr_cpu_ids) + break; + } } return nr; } static void __init smp_detect_cpus(void) { - unsigned int cpu, c_cpus, s_cpus; + unsigned int cpu, mtid, c_cpus, s_cpus; struct sclp_cpu_info *info; + u16 address; + /* Get CPU information */ info = smp_get_cpu_info(); if (!info) panic("smp_detect_cpus failed to allocate memory\n"); + + /* Find boot CPU type */ if (info->has_cpu_type) { - for (cpu = 0; cpu < info->combined; cpu++) { - if (info->cpu[cpu].address != boot_cpu_address) - continue; - /* The boot cpu dictates the cpu type. */ - boot_cpu_type = info->cpu[cpu].type; - break; - } + address = stap(); + for (cpu = 0; cpu < info->combined; cpu++) + if (info->cpu[cpu].core_id == address) { + /* The boot cpu dictates the cpu type. */ + boot_cpu_type = info->cpu[cpu].type; + break; + } + if (cpu >= info->combined) + panic("Could not find boot CPU type"); } + +#ifdef CONFIG_CRASH_DUMP + /* Collect CPU state of previous system */ + smp_store_cpu_states(info); +#endif + + /* Set multi-threading state for the current system */ + mtid = sclp_get_mtid(boot_cpu_type); + mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; + pcpu_set_smt(mtid); + + /* Print number of CPUs */ c_cpus = s_cpus = 0; for (cpu = 0; cpu < info->combined; cpu++) { if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) continue; - if (cpu < info->configured) { - smp_get_save_area(c_cpus, info->cpu[cpu].address); - c_cpus++; - } else - s_cpus++; + if (cpu < info->configured) + c_cpus += smp_cpu_mtid + 1; + else + s_cpus += smp_cpu_mtid + 1; } pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); + + /* Add CPUs present at boot */ get_online_cpus(); __smp_rescan_cpus(info, 0); put_online_cpus(); @@ -696,12 +825,23 @@ static void smp_start_secondary(void *cpuvoid) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { struct pcpu *pcpu; - int rc; + int base, i, rc; pcpu = pcpu_devices + cpu; if (pcpu->state != CPU_STATE_CONFIGURED) return -EIO; - if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != + base = cpu - (cpu % (smp_cpu_mtid + 1)); + for (i = 0; i <= smp_cpu_mtid; i++) { + if (base + i < nr_cpu_ids) + if (cpu_online(base + i)) + break; + } + /* + * If this is the first CPU of the core to get online + * do an initial CPU reset. + */ + if (i > smp_cpu_mtid && + pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; @@ -774,7 +914,8 @@ void __init smp_fill_possible_mask(void) { unsigned int possible, sclp, cpu; - sclp = sclp_get_max_cpu() ?: nr_cpu_ids; + sclp = min(smp_max_threads, sclp_get_mtid_max() + 1); + sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids; possible = setup_possible_cpus ?: nr_cpu_ids; possible = min(possible, sclp); for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) @@ -796,14 +937,9 @@ void __init smp_prepare_boot_cpu(void) { struct pcpu *pcpu = pcpu_devices; - boot_cpu_address = stap(); pcpu->state = CPU_STATE_CONFIGURED; - pcpu->address = boot_cpu_address; + pcpu->address = stap(); pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); - pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); - pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); set_cpu_present(0, true); @@ -848,7 +984,7 @@ static ssize_t cpu_configure_store(struct device *dev, const char *buf, size_t count) { struct pcpu *pcpu; - int cpu, val, rc; + int cpu, val, rc, i; char delim; if (sscanf(buf, "%d %c", &val, &delim) != 1) @@ -860,29 +996,43 @@ static ssize_t cpu_configure_store(struct device *dev, rc = -EBUSY; /* disallow configuration changes of online cpus and cpu 0 */ cpu = dev->id; - if (cpu_online(cpu) || cpu == 0) + cpu -= cpu % (smp_cpu_mtid + 1); + if (cpu == 0) goto out; + for (i = 0; i <= smp_cpu_mtid; i++) + if (cpu_online(cpu + i)) + goto out; pcpu = pcpu_devices + cpu; rc = 0; switch (val) { case 0: if (pcpu->state != CPU_STATE_CONFIGURED) break; - rc = sclp_cpu_deconfigure(pcpu->address); + rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift); if (rc) break; - pcpu->state = CPU_STATE_STANDBY; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + for (i = 0; i <= smp_cpu_mtid; i++) { + if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) + continue; + pcpu[i].state = CPU_STATE_STANDBY; + smp_cpu_set_polarization(cpu + i, + POLARIZATION_UNKNOWN); + } topology_expect_change(); break; case 1: if (pcpu->state != CPU_STATE_STANDBY) break; - rc = sclp_cpu_configure(pcpu->address); + rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift); if (rc) break; - pcpu->state = CPU_STATE_CONFIGURED; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + for (i = 0; i <= smp_cpu_mtid; i++) { + if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) + continue; + pcpu[i].state = CPU_STATE_CONFIGURED; + smp_cpu_set_polarization(cpu + i, + POLARIZATION_UNKNOWN); + } topology_expect_change(); break; default: @@ -929,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; - struct cpu *c = pcpu_devices[cpu].cpu; - struct device *s = &c->dev; + struct device *s = &per_cpu(cpu_device, cpu)->dev; int err = 0; switch (action & ~CPU_TASKS_FROZEN) { @@ -953,7 +1102,7 @@ static int smp_add_present_cpu(int cpu) c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return -ENOMEM; - pcpu_devices[cpu].cpu = c; + per_cpu(cpu_device, cpu) = c; s = &c->dev; c->hotpluggable = 1; rc = register_cpu(c, cpu); diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 811f542b8ed4..99babea026ca 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -194,6 +194,41 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info) seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); + if (info->mt_installed & 0x80) { + seq_printf(m, "LPAR CPUs G-MTID: %d\n", + info->mt_general & 0x1f); + seq_printf(m, "LPAR CPUs S-MTID: %d\n", + info->mt_installed & 0x1f); + seq_printf(m, "LPAR CPUs PS-MTID: %d\n", + info->mt_psmtid & 0x1f); + } +} + +static void print_ext_name(struct seq_file *m, int lvl, + struct sysinfo_3_2_2 *info) +{ + if (info->vm[lvl].ext_name_encoding == 0) + return; + if (info->ext_names[lvl][0] == 0) + return; + switch (info->vm[lvl].ext_name_encoding) { + case 1: /* EBCDIC */ + EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl])); + break; + case 2: /* UTF-8 */ + break; + default: + return; + } + seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl, + info->ext_names[lvl]); +} + +static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info) +{ + if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be))) + return; + seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid); } static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) @@ -213,6 +248,8 @@ static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured); seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby); seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved); + print_ext_name(m, i, info); + print_uuid(m, i, info); } } diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index b93bed76ea94..14da43b801d9 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -7,14 +7,14 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/workqueue.h> -#include <linux/bootmem.h> #include <linux/cpuset.h> #include <linux/device.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/init.h> #include <linux/delay.h> +#include <linux/init.h> +#include <linux/slab.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/mm.h> @@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock); static struct mask_info socket_info; static struct mask_info book_info; -struct cpu_topology_s390 cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); +DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology); +EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology); static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { @@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) return mask; } -static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, +static cpumask_t cpu_thread_map(unsigned int cpu) +{ + cpumask_t mask; + int i; + + cpumask_copy(&mask, cpumask_of(cpu)); + if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) + return mask; + cpu -= cpu % (smp_cpu_mtid + 1); + for (i = 0; i <= smp_cpu_mtid; i++) + if (cpu_present(cpu + i)) + cpumask_set_cpu(cpu + i, &mask); + return mask; +} + +static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, struct mask_info *book, struct mask_info *socket, int one_socket_per_cpu) { - unsigned int cpu; + unsigned int core; - for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { - unsigned int rcpu; - int lcpu; + for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) { + unsigned int rcore; + int lcpu, i; - rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; - lcpu = smp_find_processor_id(rcpu); + rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; + lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); if (lcpu < 0) continue; - cpumask_set_cpu(lcpu, &book->mask); - cpu_topology[lcpu].book_id = book->id; - cpumask_set_cpu(lcpu, &socket->mask); - cpu_topology[lcpu].core_id = rcpu; - if (one_socket_per_cpu) { - cpu_topology[lcpu].socket_id = rcpu; - socket = socket->next; - } else { - cpu_topology[lcpu].socket_id = socket->id; + for (i = 0; i <= smp_cpu_mtid; i++) { + per_cpu(cpu_topology, lcpu + i).book_id = book->id; + per_cpu(cpu_topology, lcpu + i).core_id = rcore; + per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i; + cpumask_set_cpu(lcpu + i, &book->mask); + cpumask_set_cpu(lcpu + i, &socket->mask); + if (one_socket_per_cpu) + per_cpu(cpu_topology, lcpu + i).socket_id = rcore; + else + per_cpu(cpu_topology, lcpu + i).socket_id = socket->id; + smp_cpu_set_polarization(lcpu + i, tl_core->pp); } - smp_cpu_set_polarization(lcpu, tl_cpu->pp); + if (one_socket_per_cpu) + socket = socket->next; } return socket; } @@ -108,7 +126,7 @@ static void clear_masks(void) static union topology_entry *next_tle(union topology_entry *tle) { if (!tle->nl) - return (union topology_entry *)((struct topology_cpu *)tle + 1); + return (union topology_entry *)((struct topology_core *)tle + 1); return (union topology_entry *)((struct topology_container *)tle + 1); } @@ -231,12 +249,14 @@ static void update_cpu_masks(void) spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { - cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); - cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); + per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); + per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); + per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); if (!MACHINE_HAS_TOPOLOGY) { - cpu_topology[cpu].core_id = cpu; - cpu_topology[cpu].socket_id = cpu; - cpu_topology[cpu].book_id = cpu; + per_cpu(cpu_topology, cpu).thread_id = cpu; + per_cpu(cpu_topology, cpu).core_id = cpu; + per_cpu(cpu_topology, cpu).socket_id = cpu; + per_cpu(cpu_topology, cpu).book_id = cpu; } } spin_unlock_irqrestore(&topology_lock, flags); @@ -314,50 +334,6 @@ void topology_expect_change(void) set_topology_timer(); } -static int __init early_parse_topology(char *p) -{ - if (strncmp(p, "off", 3)) - return 0; - topology_enabled = 0; - return 0; -} -early_param("topology", early_parse_topology); - -static void __init alloc_masks(struct sysinfo_15_1_x *info, - struct mask_info *mask, int offset) -{ - int i, nr_masks; - - nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; - for (i = 0; i < info->mnest - offset; i++) - nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; - nr_masks = max(nr_masks, 1); - for (i = 0; i < nr_masks; i++) { - mask->next = alloc_bootmem_align( - roundup_pow_of_two(sizeof(struct mask_info)), - roundup_pow_of_two(sizeof(struct mask_info))); - mask = mask->next; - } -} - -void __init s390_init_cpu_topology(void) -{ - struct sysinfo_15_1_x *info; - int i; - - if (!MACHINE_HAS_TOPOLOGY) - return; - tl_info = alloc_bootmem_pages(PAGE_SIZE); - info = tl_info; - store_topology(info); - pr_info("The CPU configuration topology of the machine is:"); - for (i = 0; i < TOPOLOGY_NR_MAG; i++) - printk(KERN_CONT " %d", info->mag[i]); - printk(KERN_CONT " / %d\n", info->mnest); - alloc_masks(info, &socket_info, 1); - alloc_masks(info, &book_info, 2); -} - static int cpu_management; static ssize_t dispatching_show(struct device *dev, @@ -445,23 +421,75 @@ int topology_cpu_init(struct cpu *cpu) return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); } +const struct cpumask *cpu_thread_mask(int cpu) +{ + return &per_cpu(cpu_topology, cpu).thread_mask; +} + + const struct cpumask *cpu_coregroup_mask(int cpu) { - return &cpu_topology[cpu].core_mask; + return &per_cpu(cpu_topology, cpu).core_mask; } static const struct cpumask *cpu_book_mask(int cpu) { - return &cpu_topology[cpu].book_mask; + return &per_cpu(cpu_topology, cpu).book_mask; +} + +static int __init early_parse_topology(char *p) +{ + if (strncmp(p, "off", 3)) + return 0; + topology_enabled = 0; + return 0; } +early_param("topology", early_parse_topology); static struct sched_domain_topology_level s390_topology[] = { + { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, { cpu_book_mask, SD_INIT_NAME(BOOK) }, { cpu_cpu_mask, SD_INIT_NAME(DIE) }, { NULL, }, }; +static void __init alloc_masks(struct sysinfo_15_1_x *info, + struct mask_info *mask, int offset) +{ + int i, nr_masks; + + nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; + for (i = 0; i < info->mnest - offset; i++) + nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; + nr_masks = max(nr_masks, 1); + for (i = 0; i < nr_masks; i++) { + mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL); + mask = mask->next; + } +} + +static int __init s390_topology_init(void) +{ + struct sysinfo_15_1_x *info; + int i; + + if (!MACHINE_HAS_TOPOLOGY) + return 0; + tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); + info = tl_info; + store_topology(info); + pr_info("The CPU configuration topology of the machine is:"); + for (i = 0; i < TOPOLOGY_NR_MAG; i++) + printk(KERN_CONT " %d", info->mag[i]); + printk(KERN_CONT " / %d\n", info->mnest); + alloc_masks(info, &socket_info, 1); + alloc_masks(info, &book_info, 2); + set_sched_topology(s390_topology); + return 0; +} +early_initcall(s390_topology_init); + static int __init topology_init(void) { if (MACHINE_HAS_TOPOLOGY) @@ -471,10 +499,3 @@ static int __init topology_init(void) return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); - -static int __init early_topology_init(void) -{ - set_sched_topology(s390_topology); - return 0; -} -early_initcall(early_topology_init); diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 7699e735ae28..61541fb93dc6 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -25,9 +25,7 @@ __kernel_clock_gettime: je 4f cghi %r2,__CLOCK_REALTIME je 5f - cghi %r2,__CLOCK_THREAD_CPUTIME_ID - je 9f - cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ + cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ je 9f cghi %r2,__CLOCK_MONOTONIC_COARSE je 3f @@ -106,7 +104,7 @@ __kernel_clock_gettime: aghi %r15,16 br %r14 - /* CLOCK_THREAD_CPUTIME_ID for this thread */ + /* CPUCLOCK_VIRT for this thread */ 9: icm %r0,15,__VDSO_ECTG_OK(%r5) jz 12f ear %r2,%a4 diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e34122e539a1..e53d3595a7c8 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -15,6 +15,8 @@ #include <asm/cputime.h> #include <asm/vtimer.h> #include <asm/vtime.h> +#include <asm/cpu_mf.h> +#include <asm/smp.h> static void virt_timer_expire(void); @@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock); static atomic64_t virt_timer_current; static atomic64_t virt_timer_elapsed; +static DEFINE_PER_CPU(u64, mt_cycles[32]); +static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; +static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; + static inline u64 get_vtimer(void) { u64 timer; @@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) { struct thread_info *ti = task_thread_info(tsk); u64 timer, clock, user, system, steal; + u64 user_scaled, system_scaled; + int i; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; + /* Do MT utilization calculation */ + if (smp_cpu_mtid) { + u64 cycles_new[32], *cycles_old; + u64 delta, mult, div; + + cycles_old = this_cpu_ptr(mt_cycles); + if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { + mult = div = 0; + for (i = 0; i <= smp_cpu_mtid; i++) { + delta = cycles_new[i] - cycles_old[i]; + mult += delta; + div += (i + 1) * delta; + } + if (mult > 0) { + /* Update scaling factor */ + __this_cpu_write(mt_scaling_mult, mult); + __this_cpu_write(mt_scaling_div, div); + memcpy(cycles_old, cycles_new, + sizeof(u64) * (smp_cpu_mtid + 1)); + } + } + } + user = S390_lowcore.user_timer - ti->user_timer; S390_lowcore.steal_timer -= user; ti->user_timer = S390_lowcore.user_timer; - account_user_time(tsk, user, user); system = S390_lowcore.system_timer - ti->system_timer; S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; - account_system_time(tsk, hardirq_offset, system, system); + + user_scaled = user; + system_scaled = system; + /* Do MT utilization scaling */ + if (smp_cpu_mtid) { + u64 mult = __this_cpu_read(mt_scaling_mult); + u64 div = __this_cpu_read(mt_scaling_div); + + user_scaled = (user_scaled * mult) / div; + system_scaled = (system_scaled * mult) / div; + } + account_user_time(tsk, user, user_scaled); + account_system_time(tsk, hardirq_offset, system, system_scaled); steal = S390_lowcore.steal_timer; if ((s64) steal > 0) { @@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk) void vtime_account_irq_enter(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - u64 timer, system; + u64 timer, system, system_scaled; timer = S390_lowcore.last_update_timer; S390_lowcore.last_update_timer = get_vtimer(); @@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk) system = S390_lowcore.system_timer - ti->system_timer; S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; - account_system_time(tsk, 0, system, system); + system_scaled = system; + /* Do MT utilization scaling */ + if (smp_cpu_mtid) { + u64 mult = __this_cpu_read(mt_scaling_mult); + u64 div = __this_cpu_read(mt_scaling_div); + + system_scaled = (system_scaled * mult) / div; + } + account_system_time(tsk, 0, system, system_scaled); virt_timer_forward(system); } |