diff options
author | David S. Miller <davem@davemloft.net> | 2011-11-21 19:50:33 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-11-21 19:50:33 +0100 |
commit | efd0bf97deeddd9ba53daabfc470a1399c6b0b2d (patch) | |
tree | eec56da5fbc796bac7c67f1990a18f5e0a304059 /arch/s390 | |
parent | team: replace kmalloc+memcpy by kmemdup (diff) | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff) | |
download | linux-efd0bf97deeddd9ba53daabfc470a1399c6b0b2d.tar.xz linux-efd0bf97deeddd9ba53daabfc470a1399c6b0b2d.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The forcedeth changes had a conflict with the conversion over
to atomic u64 statistics in net-next.
The libertas cfg.c code had a conflict with the bss reference
counting fix by John Linville in net-next.
Conflicts:
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/wireless/libertas/cfg.c
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/crypto/crypt_s390.h | 7 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 12 | ||||
-rw-r--r-- | arch/s390/include/asm/setup.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/timex.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/unistd.h | 4 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.S | 20 | ||||
-rw-r--r-- | arch/s390/kernel/early.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/topology.c | 45 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/s390/kvm/diag.c | 2 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 3 | ||||
-rw-r--r-- | arch/s390/kvm/interrupt.c | 1 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 12 | ||||
-rw-r--r-- | arch/s390/kvm/priv.c | 10 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 45 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 9 |
20 files changed, 161 insertions, 28 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a9fbd43395f7..373679b3744a 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -572,6 +572,7 @@ config KEXEC config CRASH_DUMP bool "kernel crash dumps" depends on 64BIT + select KEXEC help Generate crash dump after being started by kexec. Crash dump kernels are loaded in the main kernel with kexec-tools diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index 49676771bd66..ffd1ac255f19 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h @@ -368,9 +368,12 @@ static inline int crypt_s390_func_available(int func, if (facility_mask & CRYPT_S390_MSA && !test_facility(17)) return 0; - if (facility_mask & CRYPT_S390_MSA3 && !test_facility(76)) + + if (facility_mask & CRYPT_S390_MSA3 && + (!test_facility(2) || !test_facility(76))) return 0; - if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) + if (facility_mask & CRYPT_S390_MSA4 && + (!test_facility(2) || !test_facility(77))) return 0; switch (func & CRYPT_S390_OP_MASK) { diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 24e18473d926..b0c235cb6ad5 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -47,7 +47,7 @@ struct sca_block { #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) -#define CPUSTAT_HOST 0x80000000 +#define CPUSTAT_STOPPED 0x80000000 #define CPUSTAT_WAIT 0x10000000 #define CPUSTAT_ECALL_PEND 0x08000000 #define CPUSTAT_STOP_INT 0x04000000 @@ -139,6 +139,7 @@ struct kvm_vcpu_stat { u32 instruction_stfl; u32 instruction_tprot; u32 instruction_sigp_sense; + u32 instruction_sigp_sense_running; u32 instruction_sigp_external_call; u32 instruction_sigp_emergency; u32 instruction_sigp_stop; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 34ede0ea85a9..524d23b8610c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -593,6 +593,8 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) unsigned long address, bits; unsigned char skey; + if (!pte_present(*ptep)) + return pgste; address = pte_val(*ptep) & PAGE_MASK; skey = page_get_storage_key(address); bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); @@ -625,6 +627,8 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) #ifdef CONFIG_PGSTE int young; + if (!pte_present(*ptep)) + return pgste; young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); /* Transfer page referenced bit to pte software bit (host view) */ if (young || (pgste_val(pgste) & RCP_HR_BIT)) @@ -638,13 +642,15 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) } -static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) +static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) { #ifdef CONFIG_PGSTE unsigned long address; unsigned long okey, nkey; - address = pte_val(*ptep) & PAGE_MASK; + if (!pte_present(entry)) + return; + address = pte_val(entry) & PAGE_MASK; okey = nkey = page_get_storage_key(address); nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); /* Set page access key and fetch protection bit from pgste */ @@ -712,7 +718,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_set_pte(ptep, pgste); + pgste_set_pte(ptep, pgste, entry); *ptep = entry; pgste_set_unlock(ptep, pgste); } else diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 5a099714df04..097183c70407 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -82,6 +82,7 @@ extern unsigned int user_mode; #define MACHINE_FLAG_LPAR (1UL << 12) #define MACHINE_FLAG_SPP (1UL << 13) #define MACHINE_FLAG_TOPOLOGY (1UL << 14) +#define MACHINE_FLAG_STCKF (1UL << 15) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) @@ -100,6 +101,7 @@ extern unsigned int user_mode; #define MACHINE_HAS_PFMF (0) #define MACHINE_HAS_SPP (0) #define MACHINE_HAS_TOPOLOGY (0) +#define MACHINE_HAS_STCKF (0) #else /* __s390x__ */ #define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_CSP (1) @@ -111,6 +113,7 @@ extern unsigned int user_mode; #define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) #define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) +#define MACHINE_HAS_STCKF (S390_lowcore.machine_flags & MACHINE_FLAG_STCKF) #endif /* __s390x__ */ #define ZFCPDUMP_HSA_SIZE (32UL<<20) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index d610bef9c5e9..c447a27a7fdb 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -90,7 +90,7 @@ static inline unsigned long long get_clock_fast(void) { unsigned long long clk; - if (test_facility(25)) + if (MACHINE_HAS_STCKF) asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); else clk = get_clock(); diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 404bdb9671b4..58de4c91c333 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -277,7 +277,9 @@ #define __NR_clock_adjtime 337 #define __NR_syncfs 338 #define __NR_setns 339 -#define NR_syscalls 340 +#define __NR_process_vm_readv 340 +#define __NR_process_vm_writev 341 +#define NR_syscalls 342 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 5006a1d9f5d0..18c51df9fe06 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1627,3 +1627,23 @@ ENTRY(sys_setns_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_setns + +ENTRY(compat_sys_process_vm_readv_wrapper) + lgfr %r2,%r2 # compat_pid_t + llgtr %r3,%r3 # struct compat_iovec __user * + llgfr %r4,%r4 # unsigned long + llgtr %r5,%r5 # struct compat_iovec __user * + llgfr %r6,%r6 # unsigned long + llgf %r0,164(%r15) # unsigned long + stg %r0,160(%r15) + jg sys_process_vm_readv + +ENTRY(compat_sys_process_vm_writev_wrapper) + lgfr %r2,%r2 # compat_pid_t + llgtr %r3,%r3 # struct compat_iovec __user * + llgfr %r4,%r4 # unsigned long + llgtr %r5,%r5 # struct compat_iovec __user * + llgfr %r6,%r6 # unsigned long + llgf %r0,164(%r15) # unsigned long + stg %r0,160(%r15) + jg sys_process_vm_writev diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 37394b3413e2..c9ffe0025197 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -390,6 +390,8 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; if (test_facility(40)) S390_lowcore.machine_flags |= MACHINE_FLAG_SPP; + if (test_facility(25)) + S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF; #endif } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 8ac6bfa2786c..e58a462949b1 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -211,6 +211,8 @@ static void __init setup_zfcpdump(unsigned int console_devno) if (ipl_info.type != IPL_TYPE_FCP_DUMP) return; + if (OLDMEM_BASE) + return; if (console_devno != -1) sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x", ipl_info.data.fcp.dev_id.devno, console_devno); @@ -482,7 +484,7 @@ static void __init setup_memory_end(void) #ifdef CONFIG_ZFCPDUMP - if (ipl_info.type == IPL_TYPE_FCP_DUMP) { + if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) { memory_end = ZFCPDUMP_HSA_SIZE; memory_end_set = 1; } diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 73eb08c874fb..bcab2f04ba58 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -348,3 +348,5 @@ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper) SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper) SYSCALL(sys_setns,sys_setns,sys_setns_wrapper) +SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */ +SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper) diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 77b8942b9a15..fdb5b8cb260f 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -68,8 +68,10 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) return mask; } -static void add_cpus_to_mask(struct topology_cpu *tl_cpu, - struct mask_info *book, struct mask_info *core) +static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, + struct mask_info *book, + struct mask_info *core, + int z10) { unsigned int cpu; @@ -88,10 +90,16 @@ static void add_cpus_to_mask(struct topology_cpu *tl_cpu, cpu_book_id[lcpu] = book->id; #endif cpumask_set_cpu(lcpu, &core->mask); - cpu_core_id[lcpu] = core->id; + if (z10) { + cpu_core_id[lcpu] = rcpu; + core = core->next; + } else { + cpu_core_id[lcpu] = core->id; + } smp_cpu_polarization[lcpu] = tl_cpu->pp; } } + return core; } static void clear_masks(void) @@ -123,18 +131,41 @@ static void tl_to_cores(struct sysinfo_15_1_x *info) { #ifdef CONFIG_SCHED_BOOK struct mask_info *book = &book_info; + struct cpuid cpu_id; #else struct mask_info *book = NULL; #endif struct mask_info *core = &core_info; union topology_entry *tle, *end; + int z10 = 0; - +#ifdef CONFIG_SCHED_BOOK + get_cpu_id(&cpu_id); + z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098; +#endif spin_lock_irq(&topology_lock); clear_masks(); tle = info->tle; end = (union topology_entry *)((unsigned long)info + info->length); while (tle < end) { +#ifdef CONFIG_SCHED_BOOK + if (z10) { + switch (tle->nl) { + case 1: + book = book->next; + book->id = tle->container.id; + break; + case 0: + core = add_cpus_to_mask(&tle->cpu, book, core, z10); + break; + default: + clear_masks(); + goto out; + } + tle = next_tle(tle); + continue; + } +#endif switch (tle->nl) { #ifdef CONFIG_SCHED_BOOK case 2: @@ -147,7 +178,7 @@ static void tl_to_cores(struct sysinfo_15_1_x *info) core->id = tle->container.id; break; case 0: - add_cpus_to_mask(&tle->cpu, book, core); + add_cpus_to_mask(&tle->cpu, book, core, z10); break; default: clear_masks(); @@ -328,8 +359,8 @@ void __init s390_init_cpu_topology(void) for (i = 0; i < TOPOLOGY_NR_MAG; i++) printk(" %d", info->mag[i]); printk(" / %d\n", info->mnest); - alloc_masks(info, &core_info, 2); + alloc_masks(info, &core_info, 1); #ifdef CONFIG_SCHED_BOOK - alloc_masks(info, &book_info, 3); + alloc_masks(info, &book_info, 2); #endif } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 56fe6bc81fee..e4c79ebb40e6 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -43,6 +43,8 @@ SECTIONS NOTES :text :note + .dummy : { *(.dummy) } :data + RODATA #ifdef CONFIG_SHARED_KERNEL diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 87cedd61be04..8943e82cd4d9 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -70,7 +70,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } - atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index c7c51898984e..02434543eabb 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -132,7 +132,6 @@ static int handle_stop(struct kvm_vcpu *vcpu) int rc = 0; vcpu->stat.exit_stop_request++; - atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); spin_lock_bh(&vcpu->arch.local_int.lock); if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; @@ -149,6 +148,8 @@ static int handle_stop(struct kvm_vcpu *vcpu) } if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { + atomic_set_mask(CPUSTAT_STOPPED, + &vcpu->arch.sie_block->cpuflags); vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); rc = -EOPNOTSUPP; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 87c16705b381..278ee009ce65 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -252,6 +252,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, offsetof(struct _lowcore, restart_psw), sizeof(psw_t)); if (rc == -EFAULT) exception = 1; + atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); break; case KVM_S390_PROGRAM_INT: diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0bd3bea1e4cd..d1c445732451 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -65,6 +65,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_stfl", VCPU_STAT(instruction_stfl) }, { "instruction_tprot", VCPU_STAT(instruction_tprot) }, { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, + { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, @@ -127,6 +128,7 @@ int kvm_dev_ioctl_check_extension(long ext) switch (ext) { case KVM_CAP_S390_PSW: case KVM_CAP_S390_GMAP: + case KVM_CAP_SYNC_MMU: r = 1; break; default: @@ -270,10 +272,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) restore_fp_regs(&vcpu->arch.guest_fpregs); restore_access_regs(vcpu->arch.guest_acrs); gmap_enable(vcpu->arch.gmap); + atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); save_fp_regs(&vcpu->arch.guest_fpregs); save_access_regs(vcpu->arch.guest_acrs); @@ -301,7 +305,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); + atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | + CPUSTAT_SM | + CPUSTAT_STOPPED); vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) facilities; @@ -428,7 +434,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) { int rc = 0; - if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING) + if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) rc = -EBUSY; else { vcpu->run->psw_mask = psw.mask; @@ -501,7 +507,7 @@ rerun_vcpu: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); + atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 391626361084..d02638959922 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -336,6 +336,7 @@ static int handle_tprot(struct kvm_vcpu *vcpu) u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; struct vm_area_struct *vma; + unsigned long user_address; vcpu->stat.instruction_tprot++; @@ -349,9 +350,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; + /* we must resolve the address without holding the mmap semaphore. + * This is ok since the userspace hypervisor is not supposed to change + * the mapping while the guest queries the memory. Otherwise the guest + * might crash or get wrong info anyway. */ + user_address = (unsigned long) __guestaddr_to_user(vcpu, address1); + down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, - (unsigned long) __guestaddr_to_user(vcpu, address1)); + vma = find_vma(current->mm, user_address); if (!vma) { up_read(¤t->mm->mmap_sem); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index f815118835f3..0a7941d74bc6 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -31,9 +31,11 @@ #define SIGP_SET_PREFIX 0x0d #define SIGP_STORE_STATUS_ADDR 0x0e #define SIGP_SET_ARCH 0x12 +#define SIGP_SENSE_RUNNING 0x15 /* cpu status bits */ #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL +#define SIGP_STAT_NOT_RUNNING 0x00000400UL #define SIGP_STAT_INCORRECT_STATE 0x00000200UL #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL @@ -57,8 +59,8 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, spin_lock(&fi->lock); if (fi->local_int[cpu_addr] == NULL) rc = 3; /* not operational */ - else if (atomic_read(fi->local_int[cpu_addr]->cpuflags) - & CPUSTAT_RUNNING) { + else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags) + & CPUSTAT_STOPPED)) { *reg &= 0xffffffff00000000UL; rc = 1; /* status stored */ } else { @@ -251,7 +253,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, spin_lock_bh(&li->lock); /* cpu must be in stopped state */ - if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { + if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { rc = 1; /* incorrect state */ *reg &= SIGP_STAT_INCORRECT_STATE; kfree(inti); @@ -275,6 +277,38 @@ out_fi: return rc; } +static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr, + unsigned long *reg) +{ + int rc; + struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; + + if (cpu_addr >= KVM_MAX_VCPUS) + return 3; /* not operational */ + + spin_lock(&fi->lock); + if (fi->local_int[cpu_addr] == NULL) + rc = 3; /* not operational */ + else { + if (atomic_read(fi->local_int[cpu_addr]->cpuflags) + & CPUSTAT_RUNNING) { + /* running */ + rc = 1; + } else { + /* not running */ + *reg &= 0xffffffff00000000UL; + *reg |= SIGP_STAT_NOT_RUNNING; + rc = 0; + } + } + spin_unlock(&fi->lock); + + VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr, + rc); + + return rc; +} + int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) { int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; @@ -331,6 +365,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) rc = __sigp_set_prefix(vcpu, cpu_addr, parameter, &vcpu->arch.guest_gprs[r1]); break; + case SIGP_SENSE_RUNNING: + vcpu->stat.instruction_sigp_sense_running++; + rc = __sigp_sense_running(vcpu, cpu_addr, + &vcpu->arch.guest_gprs[r1]); + break; case SIGP_RESTART: vcpu->stat.instruction_sigp_restart++; /* user space must know about restart */ diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1766def5bc3f..a9a301866b3c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -587,8 +587,13 @@ static void pfault_interrupt(unsigned int ext_int_code, } else { /* Completion interrupt was faster than initial * interrupt. Set pfault_wait to -1 so the initial - * interrupt doesn't put the task to sleep. */ - tsk->thread.pfault_wait = -1; + * interrupt doesn't put the task to sleep. + * If the task is not running, ignore the completion + * interrupt since it must be a leftover of a PFAULT + * CANCEL operation which didn't remove all pending + * completion interrupts. */ + if (tsk->state == TASK_RUNNING) + tsk->thread.pfault_wait = -1; } put_task_struct(tsk); } else { |