From f433c4aec9999d1bf2ed8c328196f3b0ad5f75db Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Sun, 24 Jul 2011 10:47:58 +0200 Subject: [S390] irqs: Do not trace arch_local_{*,irq_*} functions Do not trace arch_local_save_flags(), arch_local_irq_*() and friends. Although they are marked inline, gcc may still make a function out of them and add it to the pool of functions that are traced by the function tracer. This can cause undesirable results (kernel panic, triple faults, etc). Add the notrace notation to prevent them from ever being traced. Cc: Heiko Carstens Signed-off-by: Steven Rostedt Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/irqflags.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 865d6d891ace..38fdf451febb 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -29,42 +29,42 @@ }) /* set system mask. */ -static inline void __arch_local_irq_ssm(unsigned long flags) +static inline notrace void __arch_local_irq_ssm(unsigned long flags) { asm volatile("ssm %0" : : "Q" (flags) : "memory"); } -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long arch_local_save_flags(void) { return __arch_local_irq_stosm(0x00); } -static inline unsigned long arch_local_irq_save(void) +static inline notrace unsigned long arch_local_irq_save(void) { return __arch_local_irq_stnsm(0xfc); } -static inline void arch_local_irq_disable(void) +static inline notrace void arch_local_irq_disable(void) { arch_local_irq_save(); } -static inline void arch_local_irq_enable(void) +static inline notrace void arch_local_irq_enable(void) { __arch_local_irq_stosm(0x03); } -static inline void arch_local_irq_restore(unsigned long flags) +static inline notrace void arch_local_irq_restore(unsigned long flags) { __arch_local_irq_ssm(flags); } -static inline bool arch_irqs_disabled_flags(unsigned long flags) +static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) { return !(flags & (3UL << (BITS_PER_LONG - 8))); } -static inline bool arch_irqs_disabled(void) +static inline notrace bool arch_irqs_disabled(void) { return arch_irqs_disabled_flags(arch_local_save_flags()); } -- cgit v1.2.3 From bb25b9ba3e33e941dc48048d0a784e6a05e5648a Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Sun, 24 Jul 2011 10:48:17 +0200 Subject: [S390] kvm: handle tprot intercepts When running a kvm guest we can get intercepts for tprot, if the host page table is read-only or not populated. This patch implements the most common case (linux memory detection). This also allows host copy on write for guest memory on newer systems. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/kvm_host.h | 1 + arch/s390/kvm/intercept.c | 1 + arch/s390/kvm/kvm-s390.c | 1 + arch/s390/kvm/kvm-s390.h | 1 + arch/s390/kvm/priv.c | 49 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 53 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index cef7dbf69dfc..e9bcdca32a32 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -138,6 +138,7 @@ struct kvm_vcpu_stat { u32 instruction_chsc; u32 instruction_stsi; u32 instruction_stfl; + u32 instruction_tprot; u32 instruction_sigp_sense; u32 instruction_sigp_emergency; u32 instruction_sigp_stop; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index f7b6df45d8be..b5312050b224 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -105,6 +105,7 @@ static intercept_handler_t instruction_handlers[256] = { [0xae] = kvm_s390_handle_sigp, [0xb2] = kvm_s390_handle_b2, [0xb7] = handle_lctl, + [0xe5] = kvm_s390_handle_e5, [0xeb] = handle_lctlg, }; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 67345ae7ce8d..123ebea72282 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_chsc", VCPU_STAT(instruction_chsc) }, { "instruction_stsi", VCPU_STAT(instruction_stsi) }, { "instruction_stfl", VCPU_STAT(instruction_stfl) }, + { "instruction_tprot", VCPU_STAT(instruction_tprot) }, { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a7b7586626db..65e2201a555b 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -87,6 +87,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) /* implemented in priv.c */ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); +int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); /* implemented in sigp.c */ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 73c47bd95db3..391626361084 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -326,3 +326,52 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) } return -EOPNOTSUPP; } + +static int handle_tprot(struct kvm_vcpu *vcpu) +{ + int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; + int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; + int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; + int disp2 = vcpu->arch.sie_block->ipb & 0x0fff; + u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0; + u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0; + struct vm_area_struct *vma; + + vcpu->stat.instruction_tprot++; + + /* we only handle the Linux memory detection case: + * access key == 0 + * guest DAT == off + * everything else goes to userspace. */ + if (address2 & 0xf0) + return -EOPNOTSUPP; + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) + return -EOPNOTSUPP; + + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, + (unsigned long) __guestaddr_to_user(vcpu, address1)); + if (!vma) { + up_read(¤t->mm->mmap_sem); + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + } + + vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); + if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) + vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); + if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) + vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); + + up_read(¤t->mm->mmap_sem); + return 0; +} + +int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) +{ + /* For e5xx... instructions we only handle TPROT */ + if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01) + return handle_tprot(vcpu); + return -EOPNOTSUPP; +} + -- cgit v1.2.3 From 603d1a50acf252621a3598618b018b8123aaba64 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Sun, 24 Jul 2011 10:48:18 +0200 Subject: [S390] move sie code to entry.S The entry to / exit from sie has subtle dependencies to the first level interrupt handler. Move the sie assembler code to entry64.S and replace the SIE_HOOK callback with a test and the new _TIF_SIE bit. In addition this patch fixes several problems in regard to the check for the_TIF_EXIT_SIE bits. The old code checked the TIF bits before executing the interrupt handler and it only modified the instruction address if it pointed directly to the sie instruction. In both cases it could miss a TIF bit that normally would cause an exit from the guest and would reenter the guest context. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/thread_info.h | 2 + arch/s390/kernel/entry64.S | 80 +++++++++++++++++++++++++++--- arch/s390/kernel/s390_ksyms.c | 4 ++ arch/s390/kvm/Makefile | 2 +- arch/s390/kvm/sie64a.S | 98 ------------------------------------- 5 files changed, 79 insertions(+), 107 deletions(-) delete mode 100644 arch/s390/kvm/sie64a.S (limited to 'arch/s390') diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ad1382f7932e..1a5dbb6f1495 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -94,6 +94,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ +#define TIF_SIE 12 /* guest execution active */ #define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_31BIT 17 /* 32bit process */ @@ -113,6 +114,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1<>8 | _TIF_SYSCALL_AUDIT>>8 | \ _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) +_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) #define BASED(name) name-system_call(%r13) + .macro SPP newpp +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP + jz .+8 + .insn s,0xb2800000,\newpp +#endif + .endm + .macro HANDLE_SIE_INTERCEPT #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - lg %r3,__LC_SIE_HOOK - ltgr %r3,%r3 + tm __TI_flags+6(%r12),_TIF_SIE>>8 jz 0f - basr %r14,%r3 + SPP __LC_CMF_HPP # set host id + clc SP_PSW+8(8,%r15),BASED(.Lsie_loop) + jl 0f + clc SP_PSW+8(8,%r15),BASED(.Lsie_done) + jhe 0f + mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop) 0: #endif .endm @@ -465,6 +478,7 @@ pgm_check_handler: xc SP_ILC(4,%r15),SP_ILC(%r15) mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER @@ -472,7 +486,6 @@ pgm_check_handler: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER LAST_BREAK pgm_no_vtime: - HANDLE_SIE_INTERCEPT stg %r11,SP_ARGS(%r15) lgf %r3,__LC_PGM_ILC # load program interruption code lg %r4,__LC_TRANS_EXC_CODE @@ -507,6 +520,7 @@ pgm_per_std: CREATE_STACK_FRAME __LC_SAVE_AREA mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz pgm_no_vtime2 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER @@ -514,7 +528,6 @@ pgm_per_std: mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER LAST_BREAK pgm_no_vtime2: - HANDLE_SIE_INTERCEPT lg %r1,__TI_task(%r12) tm SP_PSW+1(%r15),0x01 # kernel per event ? jz kernel_per @@ -579,6 +592,7 @@ io_int_handler: CREATE_STACK_FRAME __LC_SAVE_AREA+40 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz io_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER @@ -586,7 +600,6 @@ io_int_handler: mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER LAST_BREAK io_no_vtime: - HANDLE_SIE_INTERCEPT TRACE_IRQS_OFF la %r2,SP_PTREGS(%r15) # address of register-save area brasl %r14,do_IRQ # call standard irq handler @@ -714,6 +727,7 @@ ext_int_handler: CREATE_STACK_FRAME __LC_SAVE_AREA+40 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz ext_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER @@ -721,7 +735,6 @@ ext_int_handler: mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER LAST_BREAK ext_no_vtime: - HANDLE_SIE_INTERCEPT TRACE_IRQS_OFF lghi %r1,4096 la %r2,SP_PTREGS(%r15) # address of register-save area @@ -785,6 +798,7 @@ mcck_int_main: lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? jno mcck_no_vtime # no -> no timer update + HANDLE_SIE_INTERCEPT tm SP_PSW+1(%r15),0x01 # interrupting from user ? jz mcck_no_vtime UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER @@ -804,7 +818,6 @@ mcck_no_vtime: stosm __SF_EMPTY(%r15),0x04 # turn dat on tm __TI_flags+7(%r12),_TIF_MCCK_PENDING jno mcck_return - HANDLE_SIE_INTERCEPT TRACE_IRQS_OFF brasl %r14,s390_handle_mcck TRACE_IRQS_ON @@ -1036,6 +1049,57 @@ cleanup_io_restore_insn: .Lcritical_end: .quad __critical_end +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +/* + * sie64a calling convention: + * %r2 pointer to sie control block + * %r3 guest register save area + */ + .globl sie64a +sie64a: + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + stg %r2,__SF_EMPTY(%r15) # save control block pointer + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area + lmg %r0,%r13,0(%r3) # load guest gprs 0-13 + lg %r14,__LC_THREAD_INFO # pointer thread_info struct + oi __TI_flags+6(%r14),_TIF_SIE>>8 +sie_loop: + lg %r14,__LC_THREAD_INFO # pointer thread_info struct + tm __TI_flags+7(%r14),_TIF_EXIT_SIE + jnz sie_exit + lg %r14,__SF_EMPTY(%r15) # get control block pointer + SPP __SF_EMPTY(%r15) # set guest id + sie 0(%r14) +sie_done: + SPP __LC_CMF_HPP # set host id + lg %r14,__LC_THREAD_INFO # pointer thread_info struct +sie_exit: + ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lghi %r2,0 + br %r14 +sie_fault: + lg %r14,__LC_THREAD_INFO # pointer thread_info struct + ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lghi %r2,-EFAULT + br %r14 + + .align 8 +.Lsie_loop: + .quad sie_loop +.Lsie_done: + .quad sie_done + + .section __ex_table,"a" + .quad sie_loop,sie_fault + .previous +#endif + .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esame .globl sys_call_table diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 656fcbb9bd83..57b536649b00 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -1,6 +1,10 @@ #include +#include #include #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +EXPORT_SYMBOL(sie64a); +#endif diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index 860d26514c08..3975722bb19d 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile @@ -10,5 +10,5 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) ccflags-y := -Ivirt/kvm -Iarch/s390/kvm -kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o +kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o obj-$(CONFIG_KVM) += kvm.o diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S deleted file mode 100644 index 5faa1b1b23fa..000000000000 --- a/arch/s390/kvm/sie64a.S +++ /dev/null @@ -1,98 +0,0 @@ -/* - * sie64a.S - low level sie call - * - * Copyright IBM Corp. 2008,2010 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2 only) - * as published by the Free Software Foundation. - * - * Author(s): Heiko Carstens - * Christian Ehrhardt - */ - -#include -#include -#include -#include -#include -#include - -_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) - -/* - * offsets into stackframe - * SP_ = offsets into stack sie64 is called with - * SPI_ = offsets into irq stack - */ -SP_GREGS = __SF_EMPTY -SP_HOOK = __SF_EMPTY+8 -SP_GPP = __SF_EMPTY+16 -SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW - - - .macro SPP newpp - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP - jz 0f - .insn s,0xb2800000,\newpp -0: - .endm - -sie_irq_handler: - SPP __LC_CMF_HPP # set host id - larl %r2,sie_inst - clg %r2,SPI_PSW+8(0,%r15) # intercepted sie - jne 1f - xc __LC_SIE_HOOK(8),__LC_SIE_HOOK - lg %r2,__LC_THREAD_INFO # pointer thread_info struct - tm __TI_flags+7(%r2),_TIF_EXIT_SIE - jz 0f - larl %r2,sie_exit # work pending, leave sie - stg %r2,SPI_PSW+8(0,%r15) - br %r14 -0: larl %r2,sie_reenter # re-enter with guest id - stg %r2,SPI_PSW+8(0,%r15) -1: br %r14 - -/* - * sie64a calling convention: - * %r2 pointer to sie control block - * %r3 guest register save area - */ - .globl sie64a -sie64a: - stg %r3,SP_GREGS(%r15) # save guest register save area - stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry - lgr %r14,%r2 # pointer to sie control block - larl %r5,sie_irq_handler - stg %r2,SP_GPP(%r15) - stg %r5,SP_HOOK(%r15) # save hook target - lmg %r0,%r13,0(%r3) # load guest gprs 0-13 -sie_reenter: - mvc __LC_SIE_HOOK(8),SP_HOOK(%r15) - SPP SP_GPP(%r15) # set guest id -sie_inst: - sie 0(%r14) - xc __LC_SIE_HOOK(8),__LC_SIE_HOOK - SPP __LC_CMF_HPP # set host id -sie_exit: - lg %r14,SP_GREGS(%r15) - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lghi %r2,0 - lmg %r6,%r14,__SF_GPRS(%r15) - br %r14 - -sie_err: - xc __LC_SIE_HOOK(8),__LC_SIE_HOOK - SPP __LC_CMF_HPP # set host id - lg %r14,SP_GREGS(%r15) - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lghi %r2,-EFAULT - lmg %r6,%r14,__SF_GPRS(%r15) - br %r14 - - .section __ex_table,"a" - .quad sie_inst,sie_err - .quad sie_exit,sie_err - .quad sie_reenter,sie_err - .previous -- cgit v1.2.3 From 144d634a21caff1d54cb4bb0d073774e88130045 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Sun, 24 Jul 2011 10:48:19 +0200 Subject: [S390] fix s390 assembler code alignments The alignment is missing for various global symbols in s390 assembly code. With a recent gcc and an instruction like stgrl this can lead to a specification exception if the instruction uses such a mis-aligned address. Specify the alignment explicitely and while add it define __ALIGN for s390 and use the ENTRY define to save some lines of code. Signed-off-by: Jan Glauber Signed-off-by: Martin Schwidefsky --- arch/s390/boot/compressed/head31.S | 4 +- arch/s390/boot/compressed/head64.S | 4 +- arch/s390/include/asm/linkage.h | 5 +- arch/s390/kernel/base.S | 25 +- arch/s390/kernel/compat_wrapper.S | 836 ++++++++++++----------------------- arch/s390/kernel/entry.S | 32 +- arch/s390/kernel/entry64.S | 35 +- arch/s390/kernel/head.S | 7 +- arch/s390/kernel/head31.S | 11 +- arch/s390/kernel/head64.S | 11 +- arch/s390/kernel/mcount.S | 16 +- arch/s390/kernel/mcount64.S | 16 +- arch/s390/kernel/reipl.S | 5 +- arch/s390/kernel/reipl64.S | 5 +- arch/s390/kernel/relocate_kernel.S | 6 +- arch/s390/kernel/relocate_kernel64.S | 6 +- arch/s390/kernel/sclp.S | 5 +- arch/s390/kernel/switch_cpu.S | 8 +- arch/s390/kernel/switch_cpu64.S | 8 +- arch/s390/kernel/swsusp_asm64.S | 8 +- arch/s390/lib/qrnnd.S | 5 +- 21 files changed, 378 insertions(+), 680 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S index 2a5523a32bcc..e8c9e18b8039 100644 --- a/arch/s390/boot/compressed/head31.S +++ b/arch/s390/boot/compressed/head31.S @@ -7,14 +7,14 @@ */ #include +#include #include #include #include #include "sizes.h" __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) basr %r13,0 # get base .LPG1: # setup stack diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S index 2982cb140550..f86a4eef28a9 100644 --- a/arch/s390/boot/compressed/head64.S +++ b/arch/s390/boot/compressed/head64.S @@ -7,14 +7,14 @@ */ #include +#include #include #include #include #include "sizes.h" __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) basr %r13,0 # get base .LPG1: # setup stack diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h index 291c2d01c44f..fc8a8284778e 100644 --- a/arch/s390/include/asm/linkage.h +++ b/arch/s390/include/asm/linkage.h @@ -1,6 +1,9 @@ #ifndef __ASM_LINKAGE_H #define __ASM_LINKAGE_H -/* Nothing to see here... */ +#include + +#define __ALIGN .align 4, 0x07 +#define __ALIGN_STR __stringify(__ALIGN) #endif diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 15e46ca94335..209938c1dfc8 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -6,13 +6,13 @@ * Michael Holzheu */ +#include #include #include #ifdef CONFIG_64BIT - .globl s390_base_mcck_handler -s390_base_mcck_handler: +ENTRY(s390_base_mcck_handler) basr %r13,0 0: lg %r15,__LC_PANIC_STACK # load panic stack aghi %r15,-STACK_FRAME_OVERHEAD @@ -26,13 +26,13 @@ s390_base_mcck_handler: lpswe __LC_MCK_OLD_PSW .section .bss + .align 8 .globl s390_base_mcck_handler_fn s390_base_mcck_handler_fn: .quad 0 .previous - .globl s390_base_ext_handler -s390_base_ext_handler: +ENTRY(s390_base_ext_handler) stmg %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD @@ -46,13 +46,13 @@ s390_base_ext_handler: lpswe __LC_EXT_OLD_PSW .section .bss + .align 8 .globl s390_base_ext_handler_fn s390_base_ext_handler_fn: .quad 0 .previous - .globl s390_base_pgm_handler -s390_base_pgm_handler: +ENTRY(s390_base_pgm_handler) stmg %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD @@ -70,6 +70,7 @@ disabled_wait_psw: .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler .section .bss + .align 8 .globl s390_base_pgm_handler_fn s390_base_pgm_handler_fn: .quad 0 @@ -77,8 +78,7 @@ s390_base_pgm_handler_fn: #else /* CONFIG_64BIT */ - .globl s390_base_mcck_handler -s390_base_mcck_handler: +ENTRY(s390_base_mcck_handler) basr %r13,0 0: l %r15,__LC_PANIC_STACK # load panic stack ahi %r15,-STACK_FRAME_OVERHEAD @@ -93,13 +93,13 @@ s390_base_mcck_handler: 2: .long s390_base_mcck_handler_fn .section .bss + .align 4 .globl s390_base_mcck_handler_fn s390_base_mcck_handler_fn: .long 0 .previous - .globl s390_base_ext_handler -s390_base_ext_handler: +ENTRY(s390_base_ext_handler) stm %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: ahi %r15,-STACK_FRAME_OVERHEAD @@ -115,13 +115,13 @@ s390_base_ext_handler: 2: .long s390_base_ext_handler_fn .section .bss + .align 4 .globl s390_base_ext_handler_fn s390_base_ext_handler_fn: .long 0 .previous - .globl s390_base_pgm_handler -s390_base_pgm_handler: +ENTRY(s390_base_pgm_handler) stm %r0,%r15,__LC_SAVE_AREA basr %r13,0 0: ahi %r15,-STACK_FRAME_OVERHEAD @@ -142,6 +142,7 @@ disabled_wait_psw: .long 0x000a0000,0x00000000 + s390_base_pgm_handler .section .bss + .align 4 .globl s390_base_pgm_handler_fn s390_base_pgm_handler_fn: .long 0 diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 1f5eb789c3a7..08ab9aa6a0d5 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -7,86 +7,74 @@ * Thomas Spatzier (tspat@de.ibm.com) */ - .globl sys32_exit_wrapper -sys32_exit_wrapper: +#include + +ENTRY(sys32_exit_wrapper) lgfr %r2,%r2 # int jg sys_exit # branch to sys_exit - .globl sys32_read_wrapper -sys32_read_wrapper: +ENTRY(sys32_read_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys32_read # branch to sys_read - .globl sys32_write_wrapper -sys32_write_wrapper: +ENTRY(sys32_write_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # size_t jg sys32_write # branch to system call - .globl sys32_open_wrapper -sys32_open_wrapper: +ENTRY(sys32_open_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int lgfr %r4,%r4 # int jg sys_open # branch to system call - .globl sys32_close_wrapper -sys32_close_wrapper: +ENTRY(sys32_close_wrapper) llgfr %r2,%r2 # unsigned int jg sys_close # branch to system call - .globl sys32_creat_wrapper -sys32_creat_wrapper: +ENTRY(sys32_creat_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_creat # branch to system call - .globl sys32_link_wrapper -sys32_link_wrapper: +ENTRY(sys32_link_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_link # branch to system call - .globl sys32_unlink_wrapper -sys32_unlink_wrapper: +ENTRY(sys32_unlink_wrapper) llgtr %r2,%r2 # const char * jg sys_unlink # branch to system call - .globl sys32_chdir_wrapper -sys32_chdir_wrapper: +ENTRY(sys32_chdir_wrapper) llgtr %r2,%r2 # const char * jg sys_chdir # branch to system call - .globl sys32_time_wrapper -sys32_time_wrapper: +ENTRY(sys32_time_wrapper) llgtr %r2,%r2 # int * jg compat_sys_time # branch to system call - .globl sys32_mknod_wrapper -sys32_mknod_wrapper: +ENTRY(sys32_mknod_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int llgfr %r4,%r4 # dev jg sys_mknod # branch to system call - .globl sys32_chmod_wrapper -sys32_chmod_wrapper: +ENTRY(sys32_chmod_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # mode_t jg sys_chmod # branch to system call - .globl sys32_lchown16_wrapper -sys32_lchown16_wrapper: +ENTRY(sys32_lchown16_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # __kernel_old_uid_emu31_t llgfr %r4,%r4 # __kernel_old_uid_emu31_t jg sys32_lchown16 # branch to system call - .globl sys32_lseek_wrapper -sys32_lseek_wrapper: +ENTRY(sys32_lseek_wrapper) llgfr %r2,%r2 # unsigned int lgfr %r3,%r3 # off_t llgfr %r4,%r4 # unsigned int @@ -94,8 +82,7 @@ sys32_lseek_wrapper: #sys32_getpid_wrapper # void - .globl sys32_mount_wrapper -sys32_mount_wrapper: +ENTRY(sys32_mount_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # char * @@ -103,102 +90,85 @@ sys32_mount_wrapper: llgtr %r6,%r6 # void * jg compat_sys_mount # branch to system call - .globl sys32_oldumount_wrapper -sys32_oldumount_wrapper: +ENTRY(sys32_oldumount_wrapper) llgtr %r2,%r2 # char * jg sys_oldumount # branch to system call - .globl sys32_setuid16_wrapper -sys32_setuid16_wrapper: +ENTRY(sys32_setuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t jg sys32_setuid16 # branch to system call #sys32_getuid16_wrapper # void - .globl sys32_ptrace_wrapper -sys32_ptrace_wrapper: +ENTRY(sys32_ptrace_wrapper) lgfr %r2,%r2 # long lgfr %r3,%r3 # long llgtr %r4,%r4 # long llgfr %r5,%r5 # long jg compat_sys_ptrace # branch to system call - .globl sys32_alarm_wrapper -sys32_alarm_wrapper: +ENTRY(sys32_alarm_wrapper) llgfr %r2,%r2 # unsigned int jg sys_alarm # branch to system call - .globl compat_sys_utime_wrapper -compat_sys_utime_wrapper: +ENTRY(compat_sys_utime_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct compat_utimbuf * jg compat_sys_utime # branch to system call - .globl sys32_access_wrapper -sys32_access_wrapper: +ENTRY(sys32_access_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_access # branch to system call - .globl sys32_nice_wrapper -sys32_nice_wrapper: +ENTRY(sys32_nice_wrapper) lgfr %r2,%r2 # int jg sys_nice # branch to system call #sys32_sync_wrapper # void - .globl sys32_kill_wrapper -sys32_kill_wrapper: +ENTRY(sys32_kill_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_kill # branch to system call - .globl sys32_rename_wrapper -sys32_rename_wrapper: +ENTRY(sys32_rename_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_rename # branch to system call - .globl sys32_mkdir_wrapper -sys32_mkdir_wrapper: +ENTRY(sys32_mkdir_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_mkdir # branch to system call - .globl sys32_rmdir_wrapper -sys32_rmdir_wrapper: +ENTRY(sys32_rmdir_wrapper) llgtr %r2,%r2 # const char * jg sys_rmdir # branch to system call - .globl sys32_dup_wrapper -sys32_dup_wrapper: +ENTRY(sys32_dup_wrapper) llgfr %r2,%r2 # unsigned int jg sys_dup # branch to system call - .globl sys32_pipe_wrapper -sys32_pipe_wrapper: +ENTRY(sys32_pipe_wrapper) llgtr %r2,%r2 # u32 * jg sys_pipe # branch to system call - .globl compat_sys_times_wrapper -compat_sys_times_wrapper: +ENTRY(compat_sys_times_wrapper) llgtr %r2,%r2 # struct compat_tms * jg compat_sys_times # branch to system call - .globl sys32_brk_wrapper -sys32_brk_wrapper: +ENTRY(sys32_brk_wrapper) llgtr %r2,%r2 # unsigned long jg sys_brk # branch to system call - .globl sys32_setgid16_wrapper -sys32_setgid16_wrapper: +ENTRY(sys32_setgid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t jg sys32_setgid16 # branch to system call #sys32_getgid16_wrapper # void - .globl sys32_signal_wrapper -sys32_signal_wrapper: +ENTRY(sys32_signal_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # __sighandler_t jg sys_signal @@ -207,55 +177,46 @@ sys32_signal_wrapper: #sys32_getegid16_wrapper # void - .globl sys32_acct_wrapper -sys32_acct_wrapper: +ENTRY(sys32_acct_wrapper) llgtr %r2,%r2 # char * jg sys_acct # branch to system call - .globl sys32_umount_wrapper -sys32_umount_wrapper: +ENTRY(sys32_umount_wrapper) llgtr %r2,%r2 # char * lgfr %r3,%r3 # int jg sys_umount # branch to system call - .globl compat_sys_ioctl_wrapper -compat_sys_ioctl_wrapper: +ENTRY(compat_sys_ioctl_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int llgfr %r4,%r4 # unsigned int jg compat_sys_ioctl # branch to system call - .globl compat_sys_fcntl_wrapper -compat_sys_fcntl_wrapper: +ENTRY(compat_sys_fcntl_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int llgfr %r4,%r4 # unsigned long jg compat_sys_fcntl # branch to system call - .globl sys32_setpgid_wrapper -sys32_setpgid_wrapper: +ENTRY(sys32_setpgid_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # pid_t jg sys_setpgid # branch to system call - .globl sys32_umask_wrapper -sys32_umask_wrapper: +ENTRY(sys32_umask_wrapper) lgfr %r2,%r2 # int jg sys_umask # branch to system call - .globl sys32_chroot_wrapper -sys32_chroot_wrapper: +ENTRY(sys32_chroot_wrapper) llgtr %r2,%r2 # char * jg sys_chroot # branch to system call - .globl sys32_ustat_wrapper -sys32_ustat_wrapper: +ENTRY(sys32_ustat_wrapper) llgfr %r2,%r2 # dev_t llgtr %r3,%r3 # struct ustat * jg compat_sys_ustat - .globl sys32_dup2_wrapper -sys32_dup2_wrapper: +ENTRY(sys32_dup2_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int jg sys_dup2 # branch to system call @@ -266,262 +227,220 @@ sys32_dup2_wrapper: #sys32_setsid_wrapper # void - .globl sys32_sigaction_wrapper -sys32_sigaction_wrapper: +ENTRY(sys32_sigaction_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct old_sigaction * llgtr %r4,%r4 # struct old_sigaction32 * jg sys32_sigaction # branch to system call - .globl sys32_setreuid16_wrapper -sys32_setreuid16_wrapper: +ENTRY(sys32_setreuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t llgfr %r3,%r3 # __kernel_old_uid_emu31_t jg sys32_setreuid16 # branch to system call - .globl sys32_setregid16_wrapper -sys32_setregid16_wrapper: +ENTRY(sys32_setregid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t llgfr %r3,%r3 # __kernel_old_gid_emu31_t jg sys32_setregid16 # branch to system call - .globl sys_sigsuspend_wrapper -sys_sigsuspend_wrapper: +ENTRY(sys_sigsuspend_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgfr %r4,%r4 # old_sigset_t jg sys_sigsuspend - .globl compat_sys_sigpending_wrapper -compat_sys_sigpending_wrapper: +ENTRY(compat_sys_sigpending_wrapper) llgtr %r2,%r2 # compat_old_sigset_t * jg compat_sys_sigpending # branch to system call - .globl sys32_sethostname_wrapper -sys32_sethostname_wrapper: +ENTRY(sys32_sethostname_wrapper) llgtr %r2,%r2 # char * lgfr %r3,%r3 # int jg sys_sethostname # branch to system call - .globl compat_sys_setrlimit_wrapper -compat_sys_setrlimit_wrapper: +ENTRY(compat_sys_setrlimit_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct rlimit_emu31 * jg compat_sys_setrlimit # branch to system call - .globl compat_sys_old_getrlimit_wrapper -compat_sys_old_getrlimit_wrapper: +ENTRY(compat_sys_old_getrlimit_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct rlimit_emu31 * jg compat_sys_old_getrlimit # branch to system call - .globl compat_sys_getrlimit_wrapper -compat_sys_getrlimit_wrapper: +ENTRY(compat_sys_getrlimit_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct rlimit_emu31 * jg compat_sys_getrlimit # branch to system call - .globl sys32_mmap2_wrapper -sys32_mmap2_wrapper: +ENTRY(sys32_mmap2_wrapper) llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * jg sys32_mmap2 # branch to system call - .globl compat_sys_getrusage_wrapper -compat_sys_getrusage_wrapper: +ENTRY(compat_sys_getrusage_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct rusage_emu31 * jg compat_sys_getrusage # branch to system call - .globl compat_sys_gettimeofday_wrapper -compat_sys_gettimeofday_wrapper: +ENTRY(compat_sys_gettimeofday_wrapper) llgtr %r2,%r2 # struct timeval_emu31 * llgtr %r3,%r3 # struct timezone * jg compat_sys_gettimeofday # branch to system call - .globl compat_sys_settimeofday_wrapper -compat_sys_settimeofday_wrapper: +ENTRY(compat_sys_settimeofday_wrapper) llgtr %r2,%r2 # struct timeval_emu31 * llgtr %r3,%r3 # struct timezone * jg compat_sys_settimeofday # branch to system call - .globl sys32_getgroups16_wrapper -sys32_getgroups16_wrapper: +ENTRY(sys32_getgroups16_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # __kernel_old_gid_emu31_t * jg sys32_getgroups16 # branch to system call - .globl sys32_setgroups16_wrapper -sys32_setgroups16_wrapper: +ENTRY(sys32_setgroups16_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # __kernel_old_gid_emu31_t * jg sys32_setgroups16 # branch to system call - .globl sys32_symlink_wrapper -sys32_symlink_wrapper: +ENTRY(sys32_symlink_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_symlink # branch to system call - .globl sys32_readlink_wrapper -sys32_readlink_wrapper: +ENTRY(sys32_readlink_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # char * lgfr %r4,%r4 # int jg sys_readlink # branch to system call - .globl sys32_uselib_wrapper -sys32_uselib_wrapper: +ENTRY(sys32_uselib_wrapper) llgtr %r2,%r2 # const char * jg sys_uselib # branch to system call - .globl sys32_swapon_wrapper -sys32_swapon_wrapper: +ENTRY(sys32_swapon_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int jg sys_swapon # branch to system call - .globl sys32_reboot_wrapper -sys32_reboot_wrapper: +ENTRY(sys32_reboot_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgfr %r4,%r4 # unsigned int llgtr %r5,%r5 # void * jg sys_reboot # branch to system call - .globl old32_readdir_wrapper -old32_readdir_wrapper: +ENTRY(old32_readdir_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # void * llgfr %r4,%r4 # unsigned int jg compat_sys_old_readdir # branch to system call - .globl old32_mmap_wrapper -old32_mmap_wrapper: +ENTRY(old32_mmap_wrapper) llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * jg old32_mmap # branch to system call - .globl sys32_munmap_wrapper -sys32_munmap_wrapper: +ENTRY(sys32_munmap_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t jg sys_munmap # branch to system call - .globl sys32_truncate_wrapper -sys32_truncate_wrapper: +ENTRY(sys32_truncate_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # long jg sys_truncate # branch to system call - .globl sys32_ftruncate_wrapper -sys32_ftruncate_wrapper: +ENTRY(sys32_ftruncate_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned long jg sys_ftruncate # branch to system call - .globl sys32_fchmod_wrapper -sys32_fchmod_wrapper: +ENTRY(sys32_fchmod_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # mode_t jg sys_fchmod # branch to system call - .globl sys32_fchown16_wrapper -sys32_fchown16_wrapper: +ENTRY(sys32_fchown16_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # compat_uid_t llgfr %r4,%r4 # compat_uid_t jg sys32_fchown16 # branch to system call - .globl sys32_getpriority_wrapper -sys32_getpriority_wrapper: +ENTRY(sys32_getpriority_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_getpriority # branch to system call - .globl sys32_setpriority_wrapper -sys32_setpriority_wrapper: +ENTRY(sys32_setpriority_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int lgfr %r4,%r4 # int jg sys_setpriority # branch to system call - .globl compat_sys_statfs_wrapper -compat_sys_statfs_wrapper: +ENTRY(compat_sys_statfs_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct compat_statfs * jg compat_sys_statfs # branch to system call - .globl compat_sys_fstatfs_wrapper -compat_sys_fstatfs_wrapper: +ENTRY(compat_sys_fstatfs_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct compat_statfs * jg compat_sys_fstatfs # branch to system call - .globl compat_sys_socketcall_wrapper -compat_sys_socketcall_wrapper: +ENTRY(compat_sys_socketcall_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # u32 * jg compat_sys_socketcall # branch to system call - .globl sys32_syslog_wrapper -sys32_syslog_wrapper: +ENTRY(sys32_syslog_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * lgfr %r4,%r4 # int jg sys_syslog # branch to system call - .globl compat_sys_setitimer_wrapper -compat_sys_setitimer_wrapper: +ENTRY(compat_sys_setitimer_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct itimerval_emu31 * llgtr %r4,%r4 # struct itimerval_emu31 * jg compat_sys_setitimer # branch to system call - .globl compat_sys_getitimer_wrapper -compat_sys_getitimer_wrapper: +ENTRY(compat_sys_getitimer_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct itimerval_emu31 * jg compat_sys_getitimer # branch to system call - .globl compat_sys_newstat_wrapper -compat_sys_newstat_wrapper: +ENTRY(compat_sys_newstat_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat_emu31 * jg compat_sys_newstat # branch to system call - .globl compat_sys_newlstat_wrapper -compat_sys_newlstat_wrapper: +ENTRY(compat_sys_newlstat_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat_emu31 * jg compat_sys_newlstat # branch to system call - .globl compat_sys_newfstat_wrapper -compat_sys_newfstat_wrapper: +ENTRY(compat_sys_newfstat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # struct stat_emu31 * jg compat_sys_newfstat # branch to system call #sys32_vhangup_wrapper # void - .globl compat_sys_wait4_wrapper -compat_sys_wait4_wrapper: +ENTRY(compat_sys_wait4_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # unsigned int * lgfr %r4,%r4 # int llgtr %r5,%r5 # struct rusage * jg compat_sys_wait4 # branch to system call - .globl sys32_swapoff_wrapper -sys32_swapoff_wrapper: +ENTRY(sys32_swapoff_wrapper) llgtr %r2,%r2 # const char * jg sys_swapoff # branch to system call - .globl compat_sys_sysinfo_wrapper -compat_sys_sysinfo_wrapper: +ENTRY(compat_sys_sysinfo_wrapper) llgtr %r2,%r2 # struct sysinfo_emu31 * jg compat_sys_sysinfo # branch to system call - .globl sys32_ipc_wrapper -sys32_ipc_wrapper: +ENTRY(sys32_ipc_wrapper) llgfr %r2,%r2 # uint lgfr %r3,%r3 # int lgfr %r4,%r4 # int @@ -529,8 +448,7 @@ sys32_ipc_wrapper: llgfr %r6,%r6 # u32 jg sys32_ipc # branch to system call - .globl sys32_fsync_wrapper -sys32_fsync_wrapper: +ENTRY(sys32_fsync_wrapper) llgfr %r2,%r2 # unsigned int jg sys_fsync # branch to system call @@ -538,97 +456,81 @@ sys32_fsync_wrapper: #sys32_clone_wrapper # done in clone_glue - .globl sys32_setdomainname_wrapper -sys32_setdomainname_wrapper: +ENTRY(sys32_setdomainname_wrapper) llgtr %r2,%r2 # char * lgfr %r3,%r3 # int jg sys_setdomainname # branch to system call - .globl sys32_newuname_wrapper -sys32_newuname_wrapper: +ENTRY(sys32_newuname_wrapper) llgtr %r2,%r2 # struct new_utsname * jg sys_newuname # branch to system call - .globl compat_sys_adjtimex_wrapper -compat_sys_adjtimex_wrapper: +ENTRY(compat_sys_adjtimex_wrapper) llgtr %r2,%r2 # struct compat_timex * jg compat_sys_adjtimex # branch to system call - .globl sys32_mprotect_wrapper -sys32_mprotect_wrapper: +ENTRY(sys32_mprotect_wrapper) llgtr %r2,%r2 # unsigned long (actually pointer llgfr %r3,%r3 # size_t llgfr %r4,%r4 # unsigned long jg sys_mprotect # branch to system call - .globl compat_sys_sigprocmask_wrapper -compat_sys_sigprocmask_wrapper: +ENTRY(compat_sys_sigprocmask_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_old_sigset_t * llgtr %r4,%r4 # compat_old_sigset_t * jg compat_sys_sigprocmask # branch to system call - .globl sys_init_module_wrapper -sys_init_module_wrapper: +ENTRY(sys_init_module_wrapper) llgtr %r2,%r2 # void * llgfr %r3,%r3 # unsigned long llgtr %r4,%r4 # char * jg sys_init_module # branch to system call - .globl sys_delete_module_wrapper -sys_delete_module_wrapper: +ENTRY(sys_delete_module_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # unsigned int jg sys_delete_module # branch to system call - .globl sys32_quotactl_wrapper -sys32_quotactl_wrapper: +ENTRY(sys32_quotactl_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # qid_t llgtr %r5,%r5 # caddr_t jg sys_quotactl # branch to system call - .globl sys32_getpgid_wrapper -sys32_getpgid_wrapper: +ENTRY(sys32_getpgid_wrapper) lgfr %r2,%r2 # pid_t jg sys_getpgid # branch to system call - .globl sys32_fchdir_wrapper -sys32_fchdir_wrapper: +ENTRY(sys32_fchdir_wrapper) llgfr %r2,%r2 # unsigned int jg sys_fchdir # branch to system call - .globl sys32_bdflush_wrapper -sys32_bdflush_wrapper: +ENTRY(sys32_bdflush_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # long jg sys_bdflush # branch to system call - .globl sys32_sysfs_wrapper -sys32_sysfs_wrapper: +ENTRY(sys32_sysfs_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long jg sys_sysfs # branch to system call - .globl sys32_personality_wrapper -sys32_personality_wrapper: +ENTRY(sys32_personality_wrapper) llgfr %r2,%r2 # unsigned int jg sys_s390_personality # branch to system call - .globl sys32_setfsuid16_wrapper -sys32_setfsuid16_wrapper: +ENTRY(sys32_setfsuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t jg sys32_setfsuid16 # branch to system call - .globl sys32_setfsgid16_wrapper -sys32_setfsgid16_wrapper: +ENTRY(sys32_setfsgid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t jg sys32_setfsgid16 # branch to system call - .globl sys32_llseek_wrapper -sys32_llseek_wrapper: +ENTRY(sys32_llseek_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -636,15 +538,13 @@ sys32_llseek_wrapper: llgfr %r6,%r6 # unsigned int jg sys_llseek # branch to system call - .globl sys32_getdents_wrapper -sys32_getdents_wrapper: +ENTRY(sys32_getdents_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # void * llgfr %r4,%r4 # unsigned int jg compat_sys_getdents # branch to system call - .globl compat_sys_select_wrapper -compat_sys_select_wrapper: +ENTRY(compat_sys_select_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_fd_set * llgtr %r4,%r4 # compat_fd_set * @@ -652,112 +552,94 @@ compat_sys_select_wrapper: llgtr %r6,%r6 # struct compat_timeval * jg compat_sys_select # branch to system call - .globl sys32_flock_wrapper -sys32_flock_wrapper: +ENTRY(sys32_flock_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int jg sys_flock # branch to system call - .globl sys32_msync_wrapper -sys32_msync_wrapper: +ENTRY(sys32_msync_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t lgfr %r4,%r4 # int jg sys_msync # branch to system call - .globl compat_sys_readv_wrapper -compat_sys_readv_wrapper: +ENTRY(compat_sys_readv_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct compat_iovec * llgfr %r4,%r4 # unsigned long jg compat_sys_readv # branch to system call - .globl compat_sys_writev_wrapper -compat_sys_writev_wrapper: +ENTRY(compat_sys_writev_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct compat_iovec * llgfr %r4,%r4 # unsigned long jg compat_sys_writev # branch to system call - .globl sys32_getsid_wrapper -sys32_getsid_wrapper: +ENTRY(sys32_getsid_wrapper) lgfr %r2,%r2 # pid_t jg sys_getsid # branch to system call - .globl sys32_fdatasync_wrapper -sys32_fdatasync_wrapper: +ENTRY(sys32_fdatasync_wrapper) llgfr %r2,%r2 # unsigned int jg sys_fdatasync # branch to system call - .globl sys32_mlock_wrapper -sys32_mlock_wrapper: +ENTRY(sys32_mlock_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t jg sys_mlock # branch to system call - .globl sys32_munlock_wrapper -sys32_munlock_wrapper: +ENTRY(sys32_munlock_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t jg sys_munlock # branch to system call - .globl sys32_mlockall_wrapper -sys32_mlockall_wrapper: +ENTRY(sys32_mlockall_wrapper) lgfr %r2,%r2 # int jg sys_mlockall # branch to system call #sys32_munlockall_wrapper # void - .globl sys32_sched_setparam_wrapper -sys32_sched_setparam_wrapper: +ENTRY(sys32_sched_setparam_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # struct sched_param * jg sys_sched_setparam # branch to system call - .globl sys32_sched_getparam_wrapper -sys32_sched_getparam_wrapper: +ENTRY(sys32_sched_getparam_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # struct sched_param * jg sys_sched_getparam # branch to system call - .globl sys32_sched_setscheduler_wrapper -sys32_sched_setscheduler_wrapper: +ENTRY(sys32_sched_setscheduler_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # int llgtr %r4,%r4 # struct sched_param * jg sys_sched_setscheduler # branch to system call - .globl sys32_sched_getscheduler_wrapper -sys32_sched_getscheduler_wrapper: +ENTRY(sys32_sched_getscheduler_wrapper) lgfr %r2,%r2 # pid_t jg sys_sched_getscheduler # branch to system call #sys32_sched_yield_wrapper # void - .globl sys32_sched_get_priority_max_wrapper -sys32_sched_get_priority_max_wrapper: +ENTRY(sys32_sched_get_priority_max_wrapper) lgfr %r2,%r2 # int jg sys_sched_get_priority_max # branch to system call - .globl sys32_sched_get_priority_min_wrapper -sys32_sched_get_priority_min_wrapper: +ENTRY(sys32_sched_get_priority_min_wrapper) lgfr %r2,%r2 # int jg sys_sched_get_priority_min # branch to system call - .globl sys32_sched_rr_get_interval_wrapper -sys32_sched_rr_get_interval_wrapper: +ENTRY(sys32_sched_rr_get_interval_wrapper) lgfr %r2,%r2 # pid_t llgtr %r3,%r3 # struct compat_timespec * jg sys32_sched_rr_get_interval # branch to system call - .globl compat_sys_nanosleep_wrapper -compat_sys_nanosleep_wrapper: +ENTRY(compat_sys_nanosleep_wrapper) llgtr %r2,%r2 # struct compat_timespec * llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_nanosleep # branch to system call - .globl sys32_mremap_wrapper -sys32_mremap_wrapper: +ENTRY(sys32_mremap_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -765,50 +647,43 @@ sys32_mremap_wrapper: llgfr %r6,%r6 # unsigned long jg sys_mremap # branch to system call - .globl sys32_setresuid16_wrapper -sys32_setresuid16_wrapper: +ENTRY(sys32_setresuid16_wrapper) llgfr %r2,%r2 # __kernel_old_uid_emu31_t llgfr %r3,%r3 # __kernel_old_uid_emu31_t llgfr %r4,%r4 # __kernel_old_uid_emu31_t jg sys32_setresuid16 # branch to system call - .globl sys32_getresuid16_wrapper -sys32_getresuid16_wrapper: +ENTRY(sys32_getresuid16_wrapper) llgtr %r2,%r2 # __kernel_old_uid_emu31_t * llgtr %r3,%r3 # __kernel_old_uid_emu31_t * llgtr %r4,%r4 # __kernel_old_uid_emu31_t * jg sys32_getresuid16 # branch to system call - .globl sys32_poll_wrapper -sys32_poll_wrapper: +ENTRY(sys32_poll_wrapper) llgtr %r2,%r2 # struct pollfd * llgfr %r3,%r3 # unsigned int lgfr %r4,%r4 # long jg sys_poll # branch to system call - .globl compat_sys_nfsservctl_wrapper -compat_sys_nfsservctl_wrapper: +ENTRY(compat_sys_nfsservctl_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct compat_nfsctl_arg* llgtr %r4,%r4 # union compat_nfsctl_res* jg compat_sys_nfsservctl # branch to system call - .globl sys32_setresgid16_wrapper -sys32_setresgid16_wrapper: +ENTRY(sys32_setresgid16_wrapper) llgfr %r2,%r2 # __kernel_old_gid_emu31_t llgfr %r3,%r3 # __kernel_old_gid_emu31_t llgfr %r4,%r4 # __kernel_old_gid_emu31_t jg sys32_setresgid16 # branch to system call - .globl sys32_getresgid16_wrapper -sys32_getresgid16_wrapper: +ENTRY(sys32_getresgid16_wrapper) llgtr %r2,%r2 # __kernel_old_gid_emu31_t * llgtr %r3,%r3 # __kernel_old_gid_emu31_t * llgtr %r4,%r4 # __kernel_old_gid_emu31_t * jg sys32_getresgid16 # branch to system call - .globl sys32_prctl_wrapper -sys32_prctl_wrapper: +ENTRY(sys32_prctl_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -818,51 +693,44 @@ sys32_prctl_wrapper: #sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue - .globl sys32_rt_sigaction_wrapper -sys32_rt_sigaction_wrapper: +ENTRY(sys32_rt_sigaction_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const struct sigaction_emu31 * llgtr %r4,%r4 # const struct sigaction_emu31 * llgfr %r5,%r5 # size_t jg sys32_rt_sigaction # branch to system call - .globl sys32_rt_sigprocmask_wrapper -sys32_rt_sigprocmask_wrapper: +ENTRY(sys32_rt_sigprocmask_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # old_sigset_emu31 * llgtr %r4,%r4 # old_sigset_emu31 * llgfr %r5,%r5 # size_t jg sys32_rt_sigprocmask # branch to system call - .globl sys32_rt_sigpending_wrapper -sys32_rt_sigpending_wrapper: +ENTRY(sys32_rt_sigpending_wrapper) llgtr %r2,%r2 # sigset_emu31 * llgfr %r3,%r3 # size_t jg sys32_rt_sigpending # branch to system call - .globl compat_sys_rt_sigtimedwait_wrapper -compat_sys_rt_sigtimedwait_wrapper: +ENTRY(compat_sys_rt_sigtimedwait_wrapper) llgtr %r2,%r2 # const sigset_emu31_t * llgtr %r3,%r3 # siginfo_emu31_t * llgtr %r4,%r4 # const struct compat_timespec * llgfr %r5,%r5 # size_t jg compat_sys_rt_sigtimedwait # branch to system call - .globl sys32_rt_sigqueueinfo_wrapper -sys32_rt_sigqueueinfo_wrapper: +ENTRY(sys32_rt_sigqueueinfo_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # siginfo_emu31_t * jg sys32_rt_sigqueueinfo # branch to system call - .globl compat_sys_rt_sigsuspend_wrapper -compat_sys_rt_sigsuspend_wrapper: +ENTRY(compat_sys_rt_sigsuspend_wrapper) llgtr %r2,%r2 # compat_sigset_t * llgfr %r3,%r3 # compat_size_t jg compat_sys_rt_sigsuspend - .globl sys32_pread64_wrapper -sys32_pread64_wrapper: +ENTRY(sys32_pread64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t @@ -870,8 +738,7 @@ sys32_pread64_wrapper: llgfr %r6,%r6 # u32 jg sys32_pread64 # branch to system call - .globl sys32_pwrite64_wrapper -sys32_pwrite64_wrapper: +ENTRY(sys32_pwrite64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # size_t @@ -879,39 +746,33 @@ sys32_pwrite64_wrapper: llgfr %r6,%r6 # u32 jg sys32_pwrite64 # branch to system call - .globl sys32_chown16_wrapper -sys32_chown16_wrapper: +ENTRY(sys32_chown16_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # __kernel_old_uid_emu31_t llgfr %r4,%r4 # __kernel_old_gid_emu31_t jg sys32_chown16 # branch to system call - .globl sys32_getcwd_wrapper -sys32_getcwd_wrapper: +ENTRY(sys32_getcwd_wrapper) llgtr %r2,%r2 # char * llgfr %r3,%r3 # unsigned long jg sys_getcwd # branch to system call - .globl sys32_capget_wrapper -sys32_capget_wrapper: +ENTRY(sys32_capget_wrapper) llgtr %r2,%r2 # cap_user_header_t llgtr %r3,%r3 # cap_user_data_t jg sys_capget # branch to system call - .globl sys32_capset_wrapper -sys32_capset_wrapper: +ENTRY(sys32_capset_wrapper) llgtr %r2,%r2 # cap_user_header_t llgtr %r3,%r3 # const cap_user_data_t jg sys_capset # branch to system call - .globl sys32_sigaltstack_wrapper -sys32_sigaltstack_wrapper: +ENTRY(sys32_sigaltstack_wrapper) llgtr %r2,%r2 # const stack_emu31_t * llgtr %r3,%r3 # stack_emu31_t * jg sys32_sigaltstack - .globl sys32_sendfile_wrapper -sys32_sendfile_wrapper: +ENTRY(sys32_sendfile_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # __kernel_off_emu31_t * @@ -920,22 +781,19 @@ sys32_sendfile_wrapper: #sys32_vfork_wrapper # done in vfork_glue - .globl sys32_truncate64_wrapper -sys32_truncate64_wrapper: +ENTRY(sys32_truncate64_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long jg sys32_truncate64 # branch to system call - .globl sys32_ftruncate64_wrapper -sys32_ftruncate64_wrapper: +ENTRY(sys32_ftruncate64_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long jg sys32_ftruncate64 # branch to system call - .globl sys32_lchown_wrapper -sys32_lchown_wrapper: +ENTRY(sys32_lchown_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # gid_t @@ -946,156 +804,131 @@ sys32_lchown_wrapper: #sys32_geteuid_wrapper # void #sys32_getegid_wrapper # void - .globl sys32_setreuid_wrapper -sys32_setreuid_wrapper: +ENTRY(sys32_setreuid_wrapper) llgfr %r2,%r2 # uid_t llgfr %r3,%r3 # uid_t jg sys_setreuid # branch to system call - .globl sys32_setregid_wrapper -sys32_setregid_wrapper: +ENTRY(sys32_setregid_wrapper) llgfr %r2,%r2 # gid_t llgfr %r3,%r3 # gid_t jg sys_setregid # branch to system call - .globl sys32_getgroups_wrapper -sys32_getgroups_wrapper: +ENTRY(sys32_getgroups_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # gid_t * jg sys_getgroups # branch to system call - .globl sys32_setgroups_wrapper -sys32_setgroups_wrapper: +ENTRY(sys32_setgroups_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # gid_t * jg sys_setgroups # branch to system call - .globl sys32_fchown_wrapper -sys32_fchown_wrapper: +ENTRY(sys32_fchown_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # gid_t jg sys_fchown # branch to system call - .globl sys32_setresuid_wrapper -sys32_setresuid_wrapper: +ENTRY(sys32_setresuid_wrapper) llgfr %r2,%r2 # uid_t llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # uid_t jg sys_setresuid # branch to system call - .globl sys32_getresuid_wrapper -sys32_getresuid_wrapper: +ENTRY(sys32_getresuid_wrapper) llgtr %r2,%r2 # uid_t * llgtr %r3,%r3 # uid_t * llgtr %r4,%r4 # uid_t * jg sys_getresuid # branch to system call - .globl sys32_setresgid_wrapper -sys32_setresgid_wrapper: +ENTRY(sys32_setresgid_wrapper) llgfr %r2,%r2 # gid_t llgfr %r3,%r3 # gid_t llgfr %r4,%r4 # gid_t jg sys_setresgid # branch to system call - .globl sys32_getresgid_wrapper -sys32_getresgid_wrapper: +ENTRY(sys32_getresgid_wrapper) llgtr %r2,%r2 # gid_t * llgtr %r3,%r3 # gid_t * llgtr %r4,%r4 # gid_t * jg sys_getresgid # branch to system call - .globl sys32_chown_wrapper -sys32_chown_wrapper: +ENTRY(sys32_chown_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # uid_t llgfr %r4,%r4 # gid_t jg sys_chown # branch to system call - .globl sys32_setuid_wrapper -sys32_setuid_wrapper: +ENTRY(sys32_setuid_wrapper) llgfr %r2,%r2 # uid_t jg sys_setuid # branch to system call - .globl sys32_setgid_wrapper -sys32_setgid_wrapper: +ENTRY(sys32_setgid_wrapper) llgfr %r2,%r2 # gid_t jg sys_setgid # branch to system call - .globl sys32_setfsuid_wrapper -sys32_setfsuid_wrapper: +ENTRY(sys32_setfsuid_wrapper) llgfr %r2,%r2 # uid_t jg sys_setfsuid # branch to system call - .globl sys32_setfsgid_wrapper -sys32_setfsgid_wrapper: +ENTRY(sys32_setfsgid_wrapper) llgfr %r2,%r2 # gid_t jg sys_setfsgid # branch to system call - .globl sys32_pivot_root_wrapper -sys32_pivot_root_wrapper: +ENTRY(sys32_pivot_root_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * jg sys_pivot_root # branch to system call - .globl sys32_mincore_wrapper -sys32_mincore_wrapper: +ENTRY(sys32_mincore_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t llgtr %r4,%r4 # unsigned char * jg sys_mincore # branch to system call - .globl sys32_madvise_wrapper -sys32_madvise_wrapper: +ENTRY(sys32_madvise_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # size_t lgfr %r4,%r4 # int jg sys_madvise # branch to system call - .globl sys32_getdents64_wrapper -sys32_getdents64_wrapper: +ENTRY(sys32_getdents64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # void * llgfr %r4,%r4 # unsigned int jg sys_getdents64 # branch to system call - .globl compat_sys_fcntl64_wrapper -compat_sys_fcntl64_wrapper: +ENTRY(compat_sys_fcntl64_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int llgfr %r4,%r4 # unsigned long jg compat_sys_fcntl64 # branch to system call - .globl sys32_stat64_wrapper -sys32_stat64_wrapper: +ENTRY(sys32_stat64_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat64 * jg sys32_stat64 # branch to system call - .globl sys32_lstat64_wrapper -sys32_lstat64_wrapper: +ENTRY(sys32_lstat64_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct stat64 * jg sys32_lstat64 # branch to system call - .globl sys32_stime_wrapper -sys32_stime_wrapper: +ENTRY(sys32_stime_wrapper) llgtr %r2,%r2 # long * jg compat_sys_stime # branch to system call - .globl sys32_sysctl_wrapper -sys32_sysctl_wrapper: +ENTRY(sys32_sysctl_wrapper) llgtr %r2,%r2 # struct compat_sysctl_args * jg compat_sys_sysctl - .globl sys32_fstat64_wrapper -sys32_fstat64_wrapper: +ENTRY(sys32_fstat64_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # struct stat64 * jg sys32_fstat64 # branch to system call - .globl compat_sys_futex_wrapper -compat_sys_futex_wrapper: +ENTRY(compat_sys_futex_wrapper) llgtr %r2,%r2 # u32 * lgfr %r3,%r3 # int lgfr %r4,%r4 # int @@ -1105,8 +938,7 @@ compat_sys_futex_wrapper: stg %r0,160(%r15) jg compat_sys_futex # branch to system call - .globl sys32_setxattr_wrapper -sys32_setxattr_wrapper: +ENTRY(sys32_setxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * @@ -1114,8 +946,7 @@ sys32_setxattr_wrapper: lgfr %r6,%r6 # int jg sys_setxattr - .globl sys32_lsetxattr_wrapper -sys32_lsetxattr_wrapper: +ENTRY(sys32_lsetxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * @@ -1123,8 +954,7 @@ sys32_lsetxattr_wrapper: lgfr %r6,%r6 # int jg sys_lsetxattr - .globl sys32_fsetxattr_wrapper -sys32_fsetxattr_wrapper: +ENTRY(sys32_fsetxattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * @@ -1132,124 +962,106 @@ sys32_fsetxattr_wrapper: lgfr %r6,%r6 # int jg sys_fsetxattr - .globl sys32_getxattr_wrapper -sys32_getxattr_wrapper: +ENTRY(sys32_getxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * llgfr %r5,%r5 # size_t jg sys_getxattr - .globl sys32_lgetxattr_wrapper -sys32_lgetxattr_wrapper: +ENTRY(sys32_lgetxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * llgfr %r5,%r5 # size_t jg sys_lgetxattr - .globl sys32_fgetxattr_wrapper -sys32_fgetxattr_wrapper: +ENTRY(sys32_fgetxattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * llgtr %r4,%r4 # void * llgfr %r5,%r5 # size_t jg sys_fgetxattr - .globl sys32_listxattr_wrapper -sys32_listxattr_wrapper: +ENTRY(sys32_listxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys_listxattr - .globl sys32_llistxattr_wrapper -sys32_llistxattr_wrapper: +ENTRY(sys32_llistxattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys_llistxattr - .globl sys32_flistxattr_wrapper -sys32_flistxattr_wrapper: +ENTRY(sys32_flistxattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t jg sys_flistxattr - .globl sys32_removexattr_wrapper -sys32_removexattr_wrapper: +ENTRY(sys32_removexattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * jg sys_removexattr - .globl sys32_lremovexattr_wrapper -sys32_lremovexattr_wrapper: +ENTRY(sys32_lremovexattr_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # char * jg sys_lremovexattr - .globl sys32_fremovexattr_wrapper -sys32_fremovexattr_wrapper: +ENTRY(sys32_fremovexattr_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # char * jg sys_fremovexattr - .globl sys32_sched_setaffinity_wrapper -sys32_sched_setaffinity_wrapper: +ENTRY(sys32_sched_setaffinity_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # unsigned long * jg compat_sys_sched_setaffinity - .globl sys32_sched_getaffinity_wrapper -sys32_sched_getaffinity_wrapper: +ENTRY(sys32_sched_getaffinity_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # unsigned long * jg compat_sys_sched_getaffinity - .globl sys32_exit_group_wrapper -sys32_exit_group_wrapper: +ENTRY(sys32_exit_group_wrapper) lgfr %r2,%r2 # int jg sys_exit_group # branch to system call - .globl sys32_set_tid_address_wrapper -sys32_set_tid_address_wrapper: +ENTRY(sys32_set_tid_address_wrapper) llgtr %r2,%r2 # int * jg sys_set_tid_address # branch to system call - .globl sys_epoll_create_wrapper -sys_epoll_create_wrapper: +ENTRY(sys_epoll_create_wrapper) lgfr %r2,%r2 # int jg sys_epoll_create # branch to system call - .globl sys_epoll_ctl_wrapper -sys_epoll_ctl_wrapper: +ENTRY(sys_epoll_ctl_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int lgfr %r4,%r4 # int llgtr %r5,%r5 # struct epoll_event * jg sys_epoll_ctl # branch to system call - .globl sys_epoll_wait_wrapper -sys_epoll_wait_wrapper: +ENTRY(sys_epoll_wait_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct epoll_event * lgfr %r4,%r4 # int lgfr %r5,%r5 # int jg sys_epoll_wait # branch to system call - .globl sys32_lookup_dcookie_wrapper -sys32_lookup_dcookie_wrapper: +ENTRY(sys32_lookup_dcookie_wrapper) sllg %r2,%r2,32 # get high word of 64bit dcookie or %r2,%r3 # get low word of 64bit dcookie llgtr %r3,%r4 # char * llgfr %r4,%r5 # size_t jg sys_lookup_dcookie - .globl sys32_fadvise64_wrapper -sys32_fadvise64_wrapper: +ENTRY(sys32_fadvise64_wrapper) lgfr %r2,%r2 # int sllg %r3,%r3,32 # get high word of 64bit loff_t or %r3,%r4 # get low word of 64bit loff_t @@ -1257,81 +1069,68 @@ sys32_fadvise64_wrapper: lgfr %r5,%r6 # int jg sys32_fadvise64 - .globl sys32_fadvise64_64_wrapper -sys32_fadvise64_64_wrapper: +ENTRY(sys32_fadvise64_64_wrapper) llgtr %r2,%r2 # struct fadvise64_64_args * jg sys32_fadvise64_64 - .globl sys32_clock_settime_wrapper -sys32_clock_settime_wrapper: +ENTRY(sys32_clock_settime_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_clock_settime - .globl sys32_clock_gettime_wrapper -sys32_clock_gettime_wrapper: +ENTRY(sys32_clock_gettime_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_clock_gettime - .globl sys32_clock_getres_wrapper -sys32_clock_getres_wrapper: +ENTRY(sys32_clock_getres_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timespec * jg compat_sys_clock_getres - .globl sys32_clock_nanosleep_wrapper -sys32_clock_nanosleep_wrapper: +ENTRY(sys32_clock_nanosleep_wrapper) lgfr %r2,%r2 # clockid_t (int) lgfr %r3,%r3 # int llgtr %r4,%r4 # struct compat_timespec * llgtr %r5,%r5 # struct compat_timespec * jg compat_sys_clock_nanosleep - .globl sys32_timer_create_wrapper -sys32_timer_create_wrapper: +ENTRY(sys32_timer_create_wrapper) lgfr %r2,%r2 # timer_t (int) llgtr %r3,%r3 # struct compat_sigevent * llgtr %r4,%r4 # timer_t * jg compat_sys_timer_create - .globl sys32_timer_settime_wrapper -sys32_timer_settime_wrapper: +ENTRY(sys32_timer_settime_wrapper) lgfr %r2,%r2 # timer_t (int) lgfr %r3,%r3 # int llgtr %r4,%r4 # struct compat_itimerspec * llgtr %r5,%r5 # struct compat_itimerspec * jg compat_sys_timer_settime - .globl sys32_timer_gettime_wrapper -sys32_timer_gettime_wrapper: +ENTRY(sys32_timer_gettime_wrapper) lgfr %r2,%r2 # timer_t (int) llgtr %r3,%r3 # struct compat_itimerspec * jg compat_sys_timer_gettime - .globl sys32_timer_getoverrun_wrapper -sys32_timer_getoverrun_wrapper: +ENTRY(sys32_timer_getoverrun_wrapper) lgfr %r2,%r2 # timer_t (int) jg sys_timer_getoverrun - .globl sys32_timer_delete_wrapper -sys32_timer_delete_wrapper: +ENTRY(sys32_timer_delete_wrapper) lgfr %r2,%r2 # timer_t (int) jg sys_timer_delete - .globl sys32_io_setup_wrapper -sys32_io_setup_wrapper: +ENTRY(sys32_io_setup_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # u32 * jg compat_sys_io_setup - .globl sys32_io_destroy_wrapper -sys32_io_destroy_wrapper: +ENTRY(sys32_io_destroy_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 jg sys_io_destroy - .globl sys32_io_getevents_wrapper -sys32_io_getevents_wrapper: +ENTRY(sys32_io_getevents_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 lgfr %r3,%r3 # long lgfr %r4,%r4 # long @@ -1339,49 +1138,42 @@ sys32_io_getevents_wrapper: llgtr %r6,%r6 # struct compat_timespec * jg compat_sys_io_getevents - .globl sys32_io_submit_wrapper -sys32_io_submit_wrapper: +ENTRY(sys32_io_submit_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 lgfr %r3,%r3 # long llgtr %r4,%r4 # struct iocb ** jg compat_sys_io_submit - .globl sys32_io_cancel_wrapper -sys32_io_cancel_wrapper: +ENTRY(sys32_io_cancel_wrapper) llgfr %r2,%r2 # (aio_context_t) u32 llgtr %r3,%r3 # struct iocb * llgtr %r4,%r4 # struct io_event * jg sys_io_cancel - .globl compat_sys_statfs64_wrapper -compat_sys_statfs64_wrapper: +ENTRY(compat_sys_statfs64_wrapper) llgtr %r2,%r2 # const char * llgfr %r3,%r3 # compat_size_t llgtr %r4,%r4 # struct compat_statfs64 * jg compat_sys_statfs64 - .globl compat_sys_fstatfs64_wrapper -compat_sys_fstatfs64_wrapper: +ENTRY(compat_sys_fstatfs64_wrapper) llgfr %r2,%r2 # unsigned int fd llgfr %r3,%r3 # compat_size_t llgtr %r4,%r4 # struct compat_statfs64 * jg compat_sys_fstatfs64 - .globl compat_sys_mq_open_wrapper -compat_sys_mq_open_wrapper: +ENTRY(compat_sys_mq_open_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int llgfr %r4,%r4 # mode_t llgtr %r5,%r5 # struct compat_mq_attr * jg compat_sys_mq_open - .globl sys32_mq_unlink_wrapper -sys32_mq_unlink_wrapper: +ENTRY(sys32_mq_unlink_wrapper) llgtr %r2,%r2 # const char * jg sys_mq_unlink - .globl compat_sys_mq_timedsend_wrapper -compat_sys_mq_timedsend_wrapper: +ENTRY(compat_sys_mq_timedsend_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # const char * llgfr %r4,%r4 # size_t @@ -1389,8 +1181,7 @@ compat_sys_mq_timedsend_wrapper: llgtr %r6,%r6 # const struct compat_timespec * jg compat_sys_mq_timedsend - .globl compat_sys_mq_timedreceive_wrapper -compat_sys_mq_timedreceive_wrapper: +ENTRY(compat_sys_mq_timedreceive_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # char * llgfr %r4,%r4 # size_t @@ -1398,21 +1189,18 @@ compat_sys_mq_timedreceive_wrapper: llgtr %r6,%r6 # const struct compat_timespec * jg compat_sys_mq_timedreceive - .globl compat_sys_mq_notify_wrapper -compat_sys_mq_notify_wrapper: +ENTRY(compat_sys_mq_notify_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # struct compat_sigevent * jg compat_sys_mq_notify - .globl compat_sys_mq_getsetattr_wrapper -compat_sys_mq_getsetattr_wrapper: +ENTRY(compat_sys_mq_getsetattr_wrapper) lgfr %r2,%r2 # mqd_t llgtr %r3,%r3 # struct compat_mq_attr * llgtr %r4,%r4 # struct compat_mq_attr * jg compat_sys_mq_getsetattr - .globl compat_sys_add_key_wrapper -compat_sys_add_key_wrapper: +ENTRY(compat_sys_add_key_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * llgtr %r4,%r4 # const void * @@ -1420,16 +1208,14 @@ compat_sys_add_key_wrapper: llgfr %r6,%r6 # (key_serial_t) u32 jg sys_add_key - .globl compat_sys_request_key_wrapper -compat_sys_request_key_wrapper: +ENTRY(compat_sys_request_key_wrapper) llgtr %r2,%r2 # const char * llgtr %r3,%r3 # const char * llgtr %r4,%r4 # const void * llgfr %r5,%r5 # (key_serial_t) u32 jg sys_request_key - .globl sys32_remap_file_pages_wrapper -sys32_remap_file_pages_wrapper: +ENTRY(sys32_remap_file_pages_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgfr %r4,%r4 # unsigned long @@ -1437,8 +1223,7 @@ sys32_remap_file_pages_wrapper: llgfr %r6,%r6 # unsigned long jg sys_remap_file_pages - .globl compat_sys_waitid_wrapper -compat_sys_waitid_wrapper: +ENTRY(compat_sys_waitid_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # pid_t llgtr %r4,%r4 # siginfo_emu31_t * @@ -1446,65 +1231,56 @@ compat_sys_waitid_wrapper: llgtr %r6,%r6 # struct rusage_emu31 * jg compat_sys_waitid - .globl compat_sys_kexec_load_wrapper -compat_sys_kexec_load_wrapper: +ENTRY(compat_sys_kexec_load_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgtr %r4,%r4 # struct kexec_segment * llgfr %r5,%r5 # unsigned long jg compat_sys_kexec_load - .globl sys_ioprio_set_wrapper -sys_ioprio_set_wrapper: +ENTRY(sys_ioprio_set_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int lgfr %r4,%r4 # int jg sys_ioprio_set - .globl sys_ioprio_get_wrapper -sys_ioprio_get_wrapper: +ENTRY(sys_ioprio_get_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_ioprio_get - .globl sys_inotify_add_watch_wrapper -sys_inotify_add_watch_wrapper: +ENTRY(sys_inotify_add_watch_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # u32 jg sys_inotify_add_watch - .globl sys_inotify_rm_watch_wrapper -sys_inotify_rm_watch_wrapper: +ENTRY(sys_inotify_rm_watch_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # u32 jg sys_inotify_rm_watch - .globl compat_sys_openat_wrapper -compat_sys_openat_wrapper: +ENTRY(compat_sys_openat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int lgfr %r5,%r5 # int jg compat_sys_openat - .globl sys_mkdirat_wrapper -sys_mkdirat_wrapper: +ENTRY(sys_mkdirat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int jg sys_mkdirat - .globl sys_mknodat_wrapper -sys_mknodat_wrapper: +ENTRY(sys_mknodat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int llgfr %r5,%r5 # unsigned int jg sys_mknodat - .globl sys_fchownat_wrapper -sys_fchownat_wrapper: +ENTRY(sys_fchownat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # uid_t @@ -1512,38 +1288,33 @@ sys_fchownat_wrapper: lgfr %r6,%r6 # int jg sys_fchownat - .globl compat_sys_futimesat_wrapper -compat_sys_futimesat_wrapper: +ENTRY(compat_sys_futimesat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgtr %r4,%r4 # struct timeval * jg compat_sys_futimesat - .globl sys32_fstatat64_wrapper -sys32_fstatat64_wrapper: +ENTRY(sys32_fstatat64_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgtr %r4,%r4 # struct stat64 * lgfr %r5,%r5 # int jg sys32_fstatat64 - .globl sys_unlinkat_wrapper -sys_unlinkat_wrapper: +ENTRY(sys_unlinkat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int jg sys_unlinkat - .globl sys_renameat_wrapper -sys_renameat_wrapper: +ENTRY(sys_renameat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int llgtr %r5,%r5 # const char * jg sys_renameat - .globl sys_linkat_wrapper -sys_linkat_wrapper: +ENTRY(sys_linkat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int @@ -1551,37 +1322,32 @@ sys_linkat_wrapper: lgfr %r6,%r6 # int jg sys_linkat - .globl sys_symlinkat_wrapper -sys_symlinkat_wrapper: +ENTRY(sys_symlinkat_wrapper) llgtr %r2,%r2 # const char * lgfr %r3,%r3 # int llgtr %r4,%r4 # const char * jg sys_symlinkat - .globl sys_readlinkat_wrapper -sys_readlinkat_wrapper: +ENTRY(sys_readlinkat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgtr %r4,%r4 # char * lgfr %r5,%r5 # int jg sys_readlinkat - .globl sys_fchmodat_wrapper -sys_fchmodat_wrapper: +ENTRY(sys_fchmodat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * llgfr %r4,%r4 # mode_t jg sys_fchmodat - .globl sys_faccessat_wrapper -sys_faccessat_wrapper: +ENTRY(sys_faccessat_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char * lgfr %r4,%r4 # int jg sys_faccessat - .globl compat_sys_pselect6_wrapper -compat_sys_pselect6_wrapper: +ENTRY(compat_sys_pselect6_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # fd_set * llgtr %r4,%r4 # fd_set * @@ -1591,8 +1357,7 @@ compat_sys_pselect6_wrapper: stg %r0,160(%r15) jg compat_sys_pselect6 - .globl compat_sys_ppoll_wrapper -compat_sys_ppoll_wrapper: +ENTRY(compat_sys_ppoll_wrapper) llgtr %r2,%r2 # struct pollfd * llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # struct timespec * @@ -1600,26 +1365,22 @@ compat_sys_ppoll_wrapper: llgfr %r6,%r6 # size_t jg compat_sys_ppoll - .globl sys_unshare_wrapper -sys_unshare_wrapper: +ENTRY(sys_unshare_wrapper) llgfr %r2,%r2 # unsigned long jg sys_unshare - .globl compat_sys_set_robust_list_wrapper -compat_sys_set_robust_list_wrapper: +ENTRY(compat_sys_set_robust_list_wrapper) llgtr %r2,%r2 # struct compat_robust_list_head * llgfr %r3,%r3 # size_t jg compat_sys_set_robust_list - .globl compat_sys_get_robust_list_wrapper -compat_sys_get_robust_list_wrapper: +ENTRY(compat_sys_get_robust_list_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_uptr_t_t * llgtr %r4,%r4 # compat_size_t * jg compat_sys_get_robust_list - .globl sys_splice_wrapper -sys_splice_wrapper: +ENTRY(sys_splice_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # loff_t * lgfr %r4,%r4 # int @@ -1629,8 +1390,7 @@ sys_splice_wrapper: stg %r0,160(%r15) jg sys_splice - .globl sys_sync_file_range_wrapper -sys_sync_file_range_wrapper: +ENTRY(sys_sync_file_range_wrapper) lgfr %r2,%r2 # int sllg %r3,%r3,32 # get high word of 64bit loff_t or %r3,%r4 # get low word of 64bit loff_t @@ -1639,31 +1399,27 @@ sys_sync_file_range_wrapper: llgf %r5,164(%r15) # unsigned int jg sys_sync_file_range - .globl sys_tee_wrapper -sys_tee_wrapper: +ENTRY(sys_tee_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgfr %r4,%r4 # size_t llgfr %r5,%r5 # unsigned int jg sys_tee - .globl compat_sys_vmsplice_wrapper -compat_sys_vmsplice_wrapper: +ENTRY(compat_sys_vmsplice_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_iovec * llgfr %r4,%r4 # unsigned int llgfr %r5,%r5 # unsigned int jg compat_sys_vmsplice - .globl sys_getcpu_wrapper -sys_getcpu_wrapper: +ENTRY(sys_getcpu_wrapper) llgtr %r2,%r2 # unsigned * llgtr %r3,%r3 # unsigned * llgtr %r4,%r4 # struct getcpu_cache * jg sys_getcpu - .globl compat_sys_epoll_pwait_wrapper -compat_sys_epoll_pwait_wrapper: +ENTRY(compat_sys_epoll_pwait_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct compat_epoll_event * lgfr %r4,%r4 # int @@ -1673,34 +1429,29 @@ compat_sys_epoll_pwait_wrapper: stg %r0,160(%r15) jg compat_sys_epoll_pwait - .globl compat_sys_utimes_wrapper -compat_sys_utimes_wrapper: +ENTRY(compat_sys_utimes_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # struct compat_timeval * jg compat_sys_utimes - .globl compat_sys_utimensat_wrapper -compat_sys_utimensat_wrapper: +ENTRY(compat_sys_utimensat_wrapper) llgfr %r2,%r2 # unsigned int llgtr %r3,%r3 # char * llgtr %r4,%r4 # struct compat_timespec * lgfr %r5,%r5 # int jg compat_sys_utimensat - .globl compat_sys_signalfd_wrapper -compat_sys_signalfd_wrapper: +ENTRY(compat_sys_signalfd_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_sigset_t * llgfr %r4,%r4 # compat_size_t jg compat_sys_signalfd - .globl sys_eventfd_wrapper -sys_eventfd_wrapper: +ENTRY(sys_eventfd_wrapper) llgfr %r2,%r2 # unsigned int jg sys_eventfd - .globl sys_fallocate_wrapper -sys_fallocate_wrapper: +ENTRY(sys_fallocate_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int sllg %r4,%r4,32 # get high word of 64bit loff_t @@ -1709,94 +1460,80 @@ sys_fallocate_wrapper: l %r5,164(%r15) # get low word of 64bit loff_t jg sys_fallocate - .globl sys_timerfd_create_wrapper -sys_timerfd_create_wrapper: +ENTRY(sys_timerfd_create_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int jg sys_timerfd_create - .globl compat_sys_timerfd_settime_wrapper -compat_sys_timerfd_settime_wrapper: +ENTRY(compat_sys_timerfd_settime_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # struct compat_itimerspec * llgtr %r5,%r5 # struct compat_itimerspec * jg compat_sys_timerfd_settime - .globl compat_sys_timerfd_gettime_wrapper -compat_sys_timerfd_gettime_wrapper: +ENTRY(compat_sys_timerfd_gettime_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct compat_itimerspec * jg compat_sys_timerfd_gettime - .globl compat_sys_signalfd4_wrapper -compat_sys_signalfd4_wrapper: +ENTRY(compat_sys_signalfd4_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # compat_sigset_t * llgfr %r4,%r4 # compat_size_t lgfr %r5,%r5 # int jg compat_sys_signalfd4 - .globl sys_eventfd2_wrapper -sys_eventfd2_wrapper: +ENTRY(sys_eventfd2_wrapper) llgfr %r2,%r2 # unsigned int lgfr %r3,%r3 # int jg sys_eventfd2 - .globl sys_inotify_init1_wrapper -sys_inotify_init1_wrapper: +ENTRY(sys_inotify_init1_wrapper) lgfr %r2,%r2 # int jg sys_inotify_init1 - .globl sys_pipe2_wrapper -sys_pipe2_wrapper: +ENTRY(sys_pipe2_wrapper) llgtr %r2,%r2 # u32 * lgfr %r3,%r3 # int jg sys_pipe2 # branch to system call - .globl sys_dup3_wrapper -sys_dup3_wrapper: +ENTRY(sys_dup3_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int lgfr %r4,%r4 # int jg sys_dup3 # branch to system call - .globl sys_epoll_create1_wrapper -sys_epoll_create1_wrapper: +ENTRY(sys_epoll_create1_wrapper) lgfr %r2,%r2 # int jg sys_epoll_create1 # branch to system call - .globl sys32_readahead_wrapper -sys32_readahead_wrapper: +ENTRY(sys32_readahead_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # u32 llgfr %r4,%r4 # u32 lgfr %r5,%r5 # s32 jg sys32_readahead # branch to system call - .globl sys32_sendfile64_wrapper -sys32_sendfile64_wrapper: +ENTRY(sys32_sendfile64_wrapper) lgfr %r2,%r2 # int lgfr %r3,%r3 # int llgtr %r4,%r4 # compat_loff_t * lgfr %r5,%r5 # s32 jg sys32_sendfile64 # branch to system call - .globl sys_tkill_wrapper -sys_tkill_wrapper: +ENTRY(sys_tkill_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # int jg sys_tkill # branch to system call - .globl sys_tgkill_wrapper -sys_tgkill_wrapper: +ENTRY(sys_tgkill_wrapper) lgfr %r2,%r2 # pid_t lgfr %r3,%r3 # pid_t lgfr %r4,%r4 # int jg sys_tgkill # branch to system call - .globl compat_sys_keyctl_wrapper -compat_sys_keyctl_wrapper: +ENTRY(compat_sys_keyctl_wrapper) llgfr %r2,%r2 # u32 llgfr %r3,%r3 # u32 llgfr %r4,%r4 # u32 @@ -1804,8 +1541,7 @@ compat_sys_keyctl_wrapper: llgfr %r6,%r6 # u32 jg compat_sys_keyctl # branch to system call - .globl compat_sys_preadv_wrapper -compat_sys_preadv_wrapper: +ENTRY(compat_sys_preadv_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # compat_iovec * llgfr %r4,%r4 # unsigned long @@ -1813,8 +1549,7 @@ compat_sys_preadv_wrapper: llgfr %r6,%r6 # u32 jg compat_sys_preadv # branch to system call - .globl compat_sys_pwritev_wrapper -compat_sys_pwritev_wrapper: +ENTRY(compat_sys_pwritev_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # compat_iovec * llgfr %r4,%r4 # unsigned long @@ -1822,16 +1557,14 @@ compat_sys_pwritev_wrapper: llgfr %r6,%r6 # u32 jg compat_sys_pwritev # branch to system call - .globl compat_sys_rt_tgsigqueueinfo_wrapper -compat_sys_rt_tgsigqueueinfo_wrapper: +ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper) lgfr %r2,%r2 # compat_pid_t lgfr %r3,%r3 # compat_pid_t lgfr %r4,%r4 # int llgtr %r5,%r5 # struct compat_siginfo * jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call - .globl sys_perf_event_open_wrapper -sys_perf_event_open_wrapper: +ENTRY(sys_perf_event_open_wrapper) llgtr %r2,%r2 # const struct perf_event_attr * lgfr %r3,%r3 # pid_t lgfr %r4,%r4 # int @@ -1839,29 +1572,25 @@ sys_perf_event_open_wrapper: llgfr %r6,%r6 # unsigned long jg sys_perf_event_open # branch to system call - .globl sys_clone_wrapper -sys_clone_wrapper: +ENTRY(sys_clone_wrapper) llgfr %r2,%r2 # unsigned long llgfr %r3,%r3 # unsigned long llgtr %r4,%r4 # int * llgtr %r5,%r5 # int * jg sys_clone # branch to system call - .globl sys32_execve_wrapper -sys32_execve_wrapper: +ENTRY(sys32_execve_wrapper) llgtr %r2,%r2 # char * llgtr %r3,%r3 # compat_uptr_t * llgtr %r4,%r4 # compat_uptr_t * jg sys32_execve # branch to system call - .globl sys_fanotify_init_wrapper -sys_fanotify_init_wrapper: +ENTRY(sys_fanotify_init_wrapper) llgfr %r2,%r2 # unsigned int llgfr %r3,%r3 # unsigned int jg sys_fanotify_init # branch to system call - .globl sys_fanotify_mark_wrapper -sys_fanotify_mark_wrapper: +ENTRY(sys_fanotify_mark_wrapper) lgfr %r2,%r2 # int llgfr %r3,%r3 # unsigned int sllg %r4,%r4,32 # get high word of 64bit mask @@ -1870,16 +1599,14 @@ sys_fanotify_mark_wrapper: llgt %r6,164(%r15) # char * jg sys_fanotify_mark # branch to system call - .globl sys_prlimit64_wrapper -sys_prlimit64_wrapper: +ENTRY(sys_prlimit64_wrapper) lgfr %r2,%r2 # pid_t llgfr %r3,%r3 # unsigned int llgtr %r4,%r4 # const struct rlimit64 __user * llgtr %r5,%r5 # struct rlimit64 __user * jg sys_prlimit64 # branch to system call - .globl sys_name_to_handle_at_wrapper -sys_name_to_handle_at_wrapper: +ENTRY(sys_name_to_handle_at_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # const char __user * llgtr %r4,%r4 # struct file_handle __user * @@ -1887,21 +1614,18 @@ sys_name_to_handle_at_wrapper: lgfr %r6,%r6 # int jg sys_name_to_handle_at - .globl compat_sys_open_by_handle_at_wrapper -compat_sys_open_by_handle_at_wrapper: +ENTRY(compat_sys_open_by_handle_at_wrapper) lgfr %r2,%r2 # int llgtr %r3,%r3 # struct file_handle __user * lgfr %r4,%r4 # int jg compat_sys_open_by_handle_at - .globl compat_sys_clock_adjtime_wrapper -compat_sys_clock_adjtime_wrapper: +ENTRY(compat_sys_clock_adjtime_wrapper) lgfr %r2,%r2 # clockid_t (int) llgtr %r3,%r3 # struct compat_timex __user * jg compat_sys_clock_adjtime - .globl sys_syncfs_wrapper -sys_syncfs_wrapper: +ENTRY(sys_syncfs_wrapper) lgfr %r2,%r2 # int jg sys_syncfs diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 0476174dfff5..3eab7cfab07c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -9,8 +9,8 @@ * Heiko Carstens */ -#include #include +#include #include #include #include @@ -197,8 +197,7 @@ STACK_SIZE = 1 << STACK_SHIFT * Returns: * gpr2 = prev */ - .globl __switch_to -__switch_to: +ENTRY(__switch_to) basr %r1,0 0: l %r4,__THREAD_info(%r2) # get thread_info of prev l %r5,__THREAD_info(%r3) # get thread_info of next @@ -224,8 +223,7 @@ __critical_start: * are executed with interrupts enabled. */ - .globl system_call -system_call: +ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER sysc_saveall: SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA @@ -388,8 +386,7 @@ sysc_tracenogo: # # a new process exits the kernel with ret_from_fork # - .globl ret_from_fork -ret_from_fork: +ENTRY(ret_from_fork) l %r13,__LC_SVC_NEW_PSW+4 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? @@ -405,8 +402,7 @@ ret_from_fork: # kernel_execve function needs to deal with pt_regs that is not # at the usual place # - .globl kernel_execve -kernel_execve: +ENTRY(kernel_execve) stm %r12,%r15,48(%r15) lr %r14,%r15 l %r13,__LC_SVC_NEW_PSW+4 @@ -438,8 +434,7 @@ kernel_execve: * Program check handler routine */ - .globl pgm_check_handler -pgm_check_handler: +ENTRY(pgm_check_handler) /* * First we need to check for a special case: * Single stepping an instruction that disables the PER event mask will @@ -565,8 +560,7 @@ kernel_per: * IO interrupt handler routine */ - .globl io_int_handler -io_int_handler: +ENTRY(io_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 @@ -703,8 +697,7 @@ io_notify_resume: * External interrupt handler routine */ - .globl ext_int_handler -ext_int_handler: +ENTRY(ext_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 @@ -731,8 +724,7 @@ __critical_end: * Machine check handler routines */ - .globl mcck_int_handler -mcck_int_handler: +ENTRY(mcck_int_handler) stck __LC_MCCK_CLOCK spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs @@ -818,8 +810,7 @@ mcck_return: */ #ifdef CONFIG_SMP __CPUINIT - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: spt restart_vtime-restart_base(%r1) @@ -848,8 +839,7 @@ restart_vtime: /* * If we do not run with SMP enabled, let the new CPU crash ... */ - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: lpsw restart_crash-restart_base(%r1) diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index ab596a865300..7a0fd426ca92 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -9,8 +9,8 @@ * Heiko Carstens */ -#include #include +#include #include #include #include @@ -219,8 +219,7 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) * Returns: * gpr2 = prev */ - .globl __switch_to -__switch_to: +ENTRY(__switch_to) lg %r4,__THREAD_info(%r2) # get thread_info of prev lg %r5,__THREAD_info(%r3) # get thread_info of next tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? @@ -245,8 +244,7 @@ __critical_start: * are executed with interrupts enabled. */ - .globl system_call -system_call: +ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER sysc_saveall: SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA @@ -408,8 +406,7 @@ sysc_tracenogo: # # a new process exits the kernel with ret_from_fork # - .globl ret_from_fork -ret_from_fork: +ENTRY(ret_from_fork) lg %r13,__LC_SVC_NEW_PSW+8 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? @@ -424,8 +421,7 @@ ret_from_fork: # kernel_execve function needs to deal with pt_regs that is not # at the usual place # - .globl kernel_execve -kernel_execve: +ENTRY(kernel_execve) stmg %r12,%r15,96(%r15) lgr %r14,%r15 aghi %r15,-SP_SIZE @@ -455,8 +451,7 @@ kernel_execve: * Program check handler routine */ - .globl pgm_check_handler -pgm_check_handler: +ENTRY(pgm_check_handler) /* * First we need to check for a special case: * Single stepping an instruction that disables the PER event mask will @@ -584,8 +579,7 @@ kernel_per: /* * IO interrupt handler routine */ - .globl io_int_handler -io_int_handler: +ENTRY(io_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 @@ -719,8 +713,7 @@ io_notify_resume: /* * External interrupt handler routine */ - .globl ext_int_handler -ext_int_handler: +ENTRY(ext_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 @@ -749,8 +742,7 @@ __critical_end: /* * Machine check handler routines */ - .globl mcck_int_handler -mcck_int_handler: +ENTRY(mcck_int_handler) stck __LC_MCCK_CLOCK la %r1,4095 # revalidate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer @@ -836,8 +828,7 @@ mcck_done: */ #ifdef CONFIG_SMP __CPUINIT - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: spt restart_vtime-restart_base(%r1) @@ -864,8 +855,7 @@ restart_vtime: /* * If we do not run with SMP enabled, let the new CPU crash ... */ - .globl restart_int_handler -restart_int_handler: +ENTRY(restart_int_handler) basr %r1,0 restart_base: lpswe restart_crash-restart_base(%r1) @@ -1055,8 +1045,7 @@ cleanup_io_restore_insn: * %r2 pointer to sie control block * %r3 guest register save area */ - .globl sie64a -sie64a: +ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r3,__SF_EMPTY+8(%r15) # save guest register save area diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index fb317bf2c378..2d781bab37bb 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -22,6 +22,7 @@ */ #include +#include #include #include #include @@ -383,8 +384,7 @@ iplstart: # doesn't need a builtin ipl record. # .org 0x800 - .globl start -start: +ENTRY(start) stm %r0,%r15,0x07b0 # store registers basr %r12,%r0 .base: @@ -448,8 +448,7 @@ start: # or linload or SALIPL # .org 0x10000 - .globl startup -startup: +ENTRY(startup) basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index b8f8dc126102..dd0d1e272be9 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -11,13 +11,13 @@ */ #include +#include #include #include #include __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) basr %r13,0 # get base .LPG1: @@ -78,8 +78,7 @@ startup_continue: .Lbase_cc: .long sched_clock_base_cc - .globl _ehead -_ehead: +ENTRY(_ehead) #ifdef CONFIG_SHARED_KERNEL .org 0x100000 - 0x11000 # head.o ends at 0x11000 @@ -88,8 +87,8 @@ _ehead: # # startup-code, running in absolute addressing mode # - .globl _stext -_stext: basr %r13,0 # get base +ENTRY(_stext) + basr %r13,0 # get base .LPG3: # check control registers stctl %c0,%c15,0(%r15) diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index cdef68717416..188602898c17 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -11,13 +11,13 @@ */ #include +#include #include #include #include __HEAD - .globl startup_continue -startup_continue: +ENTRY(startup_continue) larl %r1,sched_clock_base_cc mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK larl %r13,.LPG1 # get base @@ -76,8 +76,7 @@ startup_continue: .long 0x80000000,0,0,0 # invalid access-list entries .endr - .globl _ehead -_ehead: +ENTRY(_ehead) #ifdef CONFIG_SHARED_KERNEL .org 0x100000 - 0x11000 # head.o ends at 0x11000 @@ -86,8 +85,8 @@ _ehead: # # startup-code, running in absolute addressing mode # - .globl _stext -_stext: basr %r13,0 # get base +ENTRY(_stext) + basr %r13,0 # get base .LPG3: # check control registers stctg %c0,%c15,0(%r15) diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 1e6a55795628..7e2c38ba1373 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -5,21 +5,19 @@ * */ +#include #include .section .kprobes.text, "ax" - .globl ftrace_stub -ftrace_stub: +ENTRY(ftrace_stub) br %r14 - .globl _mcount -_mcount: +ENTRY(_mcount) #ifdef CONFIG_DYNAMIC_FTRACE br %r14 - .globl ftrace_caller -ftrace_caller: +ENTRY(ftrace_caller) #endif stm %r2,%r5,16(%r15) bras %r1,2f @@ -41,8 +39,7 @@ ftrace_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER l %r2,100(%r15) l %r3,152(%r15) - .globl ftrace_graph_caller -ftrace_graph_caller: +ENTRY(ftrace_graph_caller) # The bras instruction gets runtime patched to call prepare_ftrace_return. # See ftrace_enable_ftrace_graph_caller. The patched instruction is: # bras %r14,prepare_ftrace_return @@ -56,8 +53,7 @@ ftrace_graph_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl return_to_handler -return_to_handler: +ENTRY(return_to_handler) stm %r2,%r5,16(%r15) st %r14,56(%r15) lr %r0,%r15 diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index e73667286ac0..f70cadec68fc 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S @@ -5,21 +5,19 @@ * */ +#include #include .section .kprobes.text, "ax" - .globl ftrace_stub -ftrace_stub: +ENTRY(ftrace_stub) br %r14 - .globl _mcount -_mcount: +ENTRY(_mcount) #ifdef CONFIG_DYNAMIC_FTRACE br %r14 - .globl ftrace_caller -ftrace_caller: +ENTRY(ftrace_caller) #endif larl %r1,function_trace_stop icm %r1,0xf,0(%r1) @@ -37,8 +35,7 @@ ftrace_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER lg %r2,168(%r15) lg %r3,272(%r15) - .globl ftrace_graph_caller -ftrace_graph_caller: +ENTRY(ftrace_graph_caller) # The bras instruction gets runtime patched to call prepare_ftrace_return. # See ftrace_enable_ftrace_graph_caller. The patched instruction is: # bras %r14,prepare_ftrace_return @@ -52,8 +49,7 @@ ftrace_graph_caller: #ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl return_to_handler -return_to_handler: +ENTRY(return_to_handler) stmg %r2,%r5,32(%r15) lgr %r1,%r15 aghi %r15,-160 diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index cb899d9f8505..303d961c3bb5 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -6,14 +6,15 @@ * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) */ +#include #include # # do_reipl_asm # Parameter: r2 = schid of reipl device # - .globl do_reipl_asm -do_reipl_asm: basr %r13,0 +ENTRY(do_reipl_asm) + basr %r13,0 .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) .Lpg1: # do store status of all registers diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 9eabbc90795d..78eb7cfbd3d1 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -4,6 +4,7 @@ * Denis Joseph Barrow, */ +#include #include # @@ -11,8 +12,8 @@ # Parameter: r2 = schid of reipl device # - .globl do_reipl_asm -do_reipl_asm: basr %r13,0 +ENTRY(do_reipl_asm) + basr %r13,0 .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) .Lpg1: # do store status of all registers diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index 3b456b80bcee..c91d70aede91 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -8,6 +8,8 @@ * */ +#include + /* * moves the new kernel to its destination... * %r2 = pointer to first kimage_entry_t @@ -22,8 +24,7 @@ */ .text - .globl relocate_kernel - relocate_kernel: +ENTRY(relocate_kernel) basr %r13,0 # base address .base: stnsm sys_msk-.base(%r13),0xfb # disable DAT @@ -112,6 +113,7 @@ .byte 0 .align 8 relocate_kernel_end: + .align 8 .globl relocate_kernel_len relocate_kernel_len: .quad relocate_kernel_end - relocate_kernel diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S index 1f9ea2067b59..7c3ce589a7f0 100644 --- a/arch/s390/kernel/relocate_kernel64.S +++ b/arch/s390/kernel/relocate_kernel64.S @@ -8,6 +8,8 @@ * */ +#include + /* * moves the new kernel to its destination... * %r2 = pointer to first kimage_entry_t @@ -23,8 +25,7 @@ */ .text - .globl relocate_kernel - relocate_kernel: +ENTRY(relocate_kernel) basr %r13,0 # base address .base: stnsm sys_msk-.base(%r13),0xfb # disable DAT @@ -115,6 +116,7 @@ .byte 0 .align 8 relocate_kernel_end: + .align 8 .globl relocate_kernel_len relocate_kernel_len: .quad relocate_kernel_end - relocate_kernel diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index 2e82fdd89320..95792d846bb6 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -8,6 +8,8 @@ * */ +#include + LC_EXT_NEW_PSW = 0x58 # addr of ext int handler LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter @@ -260,8 +262,7 @@ _sclp_print: # R2 = 0 on success, 1 on failure # - .globl _sclp_print_early -_sclp_print_early: +ENTRY(_sclp_print_early) stm %r6,%r15,24(%r15) # save registers ahi %r15,-96 # create stack frame #ifdef CONFIG_64BIT diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S index 20530dd2eab1..bfe070bc7659 100644 --- a/arch/s390/kernel/switch_cpu.S +++ b/arch/s390/kernel/switch_cpu.S @@ -5,6 +5,7 @@ * */ +#include #include #include @@ -16,9 +17,7 @@ # %r6 - destination cpu .section .text - .align 4 - .globl smp_switch_to_cpu -smp_switch_to_cpu: +ENTRY(smp_switch_to_cpu) stm %r6,%r15,__SF_GPRS(%r15) lr %r1,%r15 ahi %r15,-STACK_FRAME_OVERHEAD @@ -33,8 +32,7 @@ smp_switch_to_cpu: brc 2,2b /* busy, try again */ 3: j 3b - .globl smp_restart_cpu -smp_restart_cpu: +ENTRY(smp_restart_cpu) basr %r13,0 0: la %r1,.gprregs_addr-0b(%r13) l %r1,0(%r1) diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S index 5be3f43898f9..fcc42d799e41 100644 --- a/arch/s390/kernel/switch_cpu64.S +++ b/arch/s390/kernel/switch_cpu64.S @@ -5,6 +5,7 @@ * */ +#include #include #include @@ -16,9 +17,7 @@ # %r6 - destination cpu .section .text - .align 4 - .globl smp_switch_to_cpu -smp_switch_to_cpu: +ENTRY(smp_switch_to_cpu) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD @@ -31,8 +30,7 @@ smp_switch_to_cpu: brc 2,2b /* busy, try again */ 3: j 3b - .globl smp_restart_cpu -smp_restart_cpu: +ENTRY(smp_restart_cpu) larl %r1,.gprregs lmg %r0,%r15,0(%r1) 1: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index 1f066e46e83e..51bcdb50a230 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S @@ -7,6 +7,7 @@ * Michael Holzheu */ +#include #include #include #include @@ -22,9 +23,7 @@ * This function runs with disabled interrupts. */ .section .text - .align 4 - .globl swsusp_arch_suspend -swsusp_arch_suspend: +ENTRY(swsusp_arch_suspend) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD @@ -112,8 +111,7 @@ swsusp_arch_suspend: * Then we return to the function that called swsusp_arch_suspend(). * swsusp_arch_resume() runs with disabled interrupts. */ - .globl swsusp_arch_resume -swsusp_arch_resume: +ENTRY(swsusp_arch_resume) stmg %r6,%r15,__SF_GPRS(%r15) lgr %r1,%r15 aghi %r15,-STACK_FRAME_OVERHEAD diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S index eb1df632e749..d321329130ec 100644 --- a/arch/s390/lib/qrnnd.S +++ b/arch/s390/lib/qrnnd.S @@ -1,5 +1,7 @@ # S/390 __udiv_qrnnd +#include + # r2 : &__r # r3 : upper half of 64 bit word n # r4 : lower half of 64 bit word n @@ -8,8 +10,7 @@ # the quotient q is to be returned .text - .globl __udiv_qrnnd -__udiv_qrnnd: +ENTRY(__udiv_qrnnd) st %r2,24(%r15) # store pointer to reminder for later lr %r0,%r3 # reload n lr %r1,%r4 -- cgit v1.2.3 From e5992f2e6c3829cd43dbc4438ee13dcd6506f7f3 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Sun, 24 Jul 2011 10:48:20 +0200 Subject: [S390] kvm guest address space mapping Add code that allows KVM to control the virtual memory layout that is seen by a guest. The guest address space uses a second page table that shares the last level pte-tables with the process page table. If a page is unmapped from the process page table it is automatically unmapped from the guest page table as well. The guest address space mapping starts out empty, KVM can map any individual 1MB segments from the process virtual memory to any 1MB aligned location in the guest virtual memory. If a target segment in the process virtual memory does not exist or is unmapped while a guest mapping exists the desired target address is stored as an invalid segment table entry in the guest page table. The population of the guest page table is fault driven. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 2 +- arch/s390/include/asm/mmu.h | 4 +- arch/s390/include/asm/pgalloc.h | 7 +- arch/s390/include/asm/pgtable.h | 42 ++++ arch/s390/include/asm/processor.h | 1 + arch/s390/include/asm/tlbflush.h | 2 +- arch/s390/kernel/asm-offsets.c | 2 +- arch/s390/mm/fault.c | 18 +- arch/s390/mm/hugetlbpage.c | 2 +- arch/s390/mm/pgtable.c | 421 +++++++++++++++++++++++++++++++++++--- arch/s390/mm/vmem.c | 8 +- 11 files changed, 473 insertions(+), 36 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 228cf0b295db..f26280d9e88d 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -268,7 +268,7 @@ struct _lowcore { __u64 vdso_per_cpu_data; /* 0x0358 */ __u64 machine_flags; /* 0x0360 */ __u64 ftrace_func; /* 0x0368 */ - __u64 sie_hook; /* 0x0370 */ + __u64 gmap; /* 0x0370 */ __u64 cmf_hpp; /* 0x0378 */ /* Interrupt response block. */ diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 82d0847896a0..4506791adcd5 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { unsigned int flush_mm; spinlock_t list_lock; struct list_head pgtable_list; + struct list_head gmap_list; unsigned long asce_bits; unsigned long asce_limit; unsigned long vdso_base; @@ -17,6 +18,7 @@ typedef struct { #define INIT_MM_CONTEXT(name) \ .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ - .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), + .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ + .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), #endif diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 38e71ebcd3c2..8eef9b5b3cf4 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -20,7 +20,7 @@ unsigned long *crst_table_alloc(struct mm_struct *); void crst_table_free(struct mm_struct *, unsigned long *); -unsigned long *page_table_alloc(struct mm_struct *); +unsigned long *page_table_alloc(struct mm_struct *, unsigned long); void page_table_free(struct mm_struct *, unsigned long *); #ifdef CONFIG_HAVE_RCU_TABLE_FREE void page_table_free_rcu(struct mmu_gather *, unsigned long *); @@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { spin_lock_init(&mm->context.list_lock); INIT_LIST_HEAD(&mm->context.pgtable_list); + INIT_LIST_HEAD(&mm->context.gmap_list); return (pgd_t *) crst_table_alloc(mm); } #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) @@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm, /* * page table entry allocation/free routines. */ -#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) -#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) +#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) +#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr)) #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 801fbe1d837d..519eb5f187ef 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -654,6 +654,48 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) #endif } +/** + * struct gmap_struct - guest address space + * @mm: pointer to the parent mm_struct + * @table: pointer to the page directory + * @crst_list: list of all crst tables used in the guest address space + */ +struct gmap { + struct list_head list; + struct mm_struct *mm; + unsigned long *table; + struct list_head crst_list; +}; + +/** + * struct gmap_rmap - reverse mapping for segment table entries + * @next: pointer to the next gmap_rmap structure in the list + * @entry: pointer to a segment table entry + */ +struct gmap_rmap { + struct list_head list; + unsigned long *entry; +}; + +/** + * struct gmap_pgtable - gmap information attached to a page table + * @vmaddr: address of the 1MB segment in the process virtual memory + * @mapper: list of segment table entries maping a page table + */ +struct gmap_pgtable { + unsigned long vmaddr; + struct list_head mapper; +}; + +struct gmap *gmap_alloc(struct mm_struct *mm); +void gmap_free(struct gmap *gmap); +void gmap_enable(struct gmap *gmap); +void gmap_disable(struct gmap *gmap); +int gmap_map_segment(struct gmap *gmap, unsigned long from, + unsigned long to, unsigned long length); +int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); +unsigned long gmap_fault(unsigned long address, struct gmap *); + /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 1300c3025334..55dfcc8bdc0d 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -80,6 +80,7 @@ struct thread_struct { mm_segment_t mm_segment; unsigned long prot_addr; /* address of protection-excep. */ unsigned int trap_no; + unsigned long gmap_addr; /* address of last gmap fault. */ struct per_regs per_user; /* User specified PER registers */ struct per_event per_event; /* Cause of the last PER trap */ /* pfault_wait is used to block the process on a pfault event */ diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index b7a4f2eb0057..304445382382 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -80,7 +80,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) * on all cpus instead of doing a local flush if the mm * only ran on the local cpu. */ - if (MACHINE_HAS_IDTE) + if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) __tlb_flush_idte((unsigned long) mm->pgd | mm->context.asce_bits); else diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index edfbd17d7082..05d8f38734ec 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -151,7 +151,7 @@ int main(void) DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); - DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook)); + DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); #endif /* CONFIG_32BIT */ return 0; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 095f782a5512..9564fc779b27 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -303,9 +303,24 @@ static inline int do_exception(struct pt_regs *regs, int access, flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; -retry: down_read(&mm->mmap_sem); +#ifdef CONFIG_PGSTE + if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { + address = gmap_fault(address, + (struct gmap *) S390_lowcore.gmap); + if (address == -EFAULT) { + fault = VM_FAULT_BADMAP; + goto out_up; + } + if (address == -ENOMEM) { + fault = VM_FAULT_OOM; + goto out_up; + } + } +#endif + +retry: fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) @@ -356,6 +371,7 @@ retry: /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; + down_read(&mm->mmap_sem); goto retry; } } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index a4d856db9154..597bb2d27c3c 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -35,7 +35,7 @@ int arch_prepare_hugepage(struct page *page) if (MACHINE_HAS_HPAGE) return 0; - ptep = (pte_t *) pte_alloc_one(&init_mm, address); + ptep = (pte_t *) pte_alloc_one(&init_mm, addr); if (!ptep) return -ENOMEM; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 37a23c223705..2adb23938a7f 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -133,30 +134,374 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) } #endif -static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) +#ifdef CONFIG_PGSTE + +/** + * gmap_alloc - allocate a guest address space + * @mm: pointer to the parent mm_struct + * + * Returns a guest address space structure. + */ +struct gmap *gmap_alloc(struct mm_struct *mm) { - unsigned int old, new; + struct gmap *gmap; + struct page *page; + unsigned long *table; - do { - old = atomic_read(v); - new = old ^ bits; - } while (atomic_cmpxchg(v, old, new) != old); - return new; + gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); + if (!gmap) + goto out; + INIT_LIST_HEAD(&gmap->crst_list); + gmap->mm = mm; + page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + if (!page) + goto out_free; + list_add(&page->lru, &gmap->crst_list); + table = (unsigned long *) page_to_phys(page); + crst_table_init(table, _REGION1_ENTRY_EMPTY); + gmap->table = table; + list_add(&gmap->list, &mm->context.gmap_list); + return gmap; + +out_free: + kfree(gmap); +out: + return NULL; } +EXPORT_SYMBOL_GPL(gmap_alloc); -/* - * page table entry allocation/free routines. +static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) +{ + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct page *page; + + if (*table & _SEGMENT_ENTRY_INV) + return 0; + page = pfn_to_page(*table >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry(rmap, &mp->mapper, list) { + if (rmap->entry != table) + continue; + list_del(&rmap->list); + kfree(rmap); + break; + } + *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + return 1; +} + +static void gmap_flush_tlb(struct gmap *gmap) +{ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte((unsigned long) gmap->table | + _ASCE_TYPE_REGION1); + else + __tlb_flush_global(); +} + +/** + * gmap_free - free a guest address space + * @gmap: pointer to the guest address space structure */ -#ifdef CONFIG_PGSTE -static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) +void gmap_free(struct gmap *gmap) +{ + struct page *page, *next; + unsigned long *table; + int i; + + + /* Flush tlb. */ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte((unsigned long) gmap->table | + _ASCE_TYPE_REGION1); + else + __tlb_flush_global(); + + /* Free all segment & region tables. */ + down_read(&gmap->mm->mmap_sem); + list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { + table = (unsigned long *) page_to_phys(page); + if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) + /* Remove gmap rmap structures for segment table. */ + for (i = 0; i < PTRS_PER_PMD; i++, table++) + gmap_unlink_segment(gmap, table); + __free_pages(page, ALLOC_ORDER); + } + up_read(&gmap->mm->mmap_sem); + list_del(&gmap->list); + kfree(gmap); +} +EXPORT_SYMBOL_GPL(gmap_free); + +/** + * gmap_enable - switch primary space to the guest address space + * @gmap: pointer to the guest address space structure + */ +void gmap_enable(struct gmap *gmap) +{ + /* Load primary space page table origin. */ + S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | __pa(gmap->table); + asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); + S390_lowcore.gmap = (unsigned long) gmap; +} +EXPORT_SYMBOL_GPL(gmap_enable); + +/** + * gmap_disable - switch back to the standard primary address space + * @gmap: pointer to the guest address space structure + */ +void gmap_disable(struct gmap *gmap) +{ + /* Load primary space page table origin. */ + S390_lowcore.user_asce = + gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); + asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); + S390_lowcore.gmap = 0UL; +} +EXPORT_SYMBOL_GPL(gmap_disable); + +static int gmap_alloc_table(struct gmap *gmap, + unsigned long *table, unsigned long init) +{ + struct page *page; + unsigned long *new; + + page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + if (!page) + return -ENOMEM; + new = (unsigned long *) page_to_phys(page); + crst_table_init(new, init); + down_read(&gmap->mm->mmap_sem); + if (*table & _REGION_ENTRY_INV) { + list_add(&page->lru, &gmap->crst_list); + *table = (unsigned long) new | _REGION_ENTRY_LENGTH | + (*table & _REGION_ENTRY_TYPE_MASK); + } else + __free_pages(page, ALLOC_ORDER); + up_read(&gmap->mm->mmap_sem); + return 0; +} + +/** + * gmap_unmap_segment - unmap segment from the guest address space + * @gmap: pointer to the guest address space structure + * @addr: address in the guest address space + * @len: length of the memory area to unmap + * + * Returns 0 if the unmap succeded, -EINVAL if not. + */ +int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) +{ + unsigned long *table; + unsigned long off; + int flush; + + if ((to | len) & (PMD_SIZE - 1)) + return -EINVAL; + if (len == 0 || to + len < to) + return -EINVAL; + + flush = 0; + down_read(&gmap->mm->mmap_sem); + for (off = 0; off < len; off += PMD_SIZE) { + /* Walk the guest addr space page table */ + table = gmap->table + (((to + off) >> 53) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 42) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 31) & 0x7ff); + if (*table & _REGION_ENTRY_INV) + return 0; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 20) & 0x7ff); + + /* Clear segment table entry in guest address space. */ + flush |= gmap_unlink_segment(gmap, table); + *table = _SEGMENT_ENTRY_INV; + } + up_read(&gmap->mm->mmap_sem); + if (flush) + gmap_flush_tlb(gmap); + return 0; +} +EXPORT_SYMBOL_GPL(gmap_unmap_segment); + +/** + * gmap_mmap_segment - map a segment to the guest address space + * @gmap: pointer to the guest address space structure + * @from: source address in the parent address space + * @to: target address in the guest address space + * + * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. + */ +int gmap_map_segment(struct gmap *gmap, unsigned long from, + unsigned long to, unsigned long len) +{ + unsigned long *table; + unsigned long off; + int flush; + + if ((from | to | len) & (PMD_SIZE - 1)) + return -EINVAL; + if (len == 0 || from + len > PGDIR_SIZE || + from + len < from || to + len < to) + return -EINVAL; + + flush = 0; + down_read(&gmap->mm->mmap_sem); + for (off = 0; off < len; off += PMD_SIZE) { + /* Walk the gmap address space page table */ + table = gmap->table + (((to + off) >> 53) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 42) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 31) & 0x7ff); + if ((*table & _REGION_ENTRY_INV) && + gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) + goto out_unmap; + table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); + table = table + (((to + off) >> 20) & 0x7ff); + + /* Store 'from' address in an invalid segment table entry. */ + flush |= gmap_unlink_segment(gmap, table); + *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); + } + up_read(&gmap->mm->mmap_sem); + if (flush) + gmap_flush_tlb(gmap); + return 0; + +out_unmap: + up_read(&gmap->mm->mmap_sem); + gmap_unmap_segment(gmap, to, len); + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(gmap_map_segment); + +unsigned long gmap_fault(unsigned long address, struct gmap *gmap) +{ + unsigned long *table, vmaddr, segment; + struct mm_struct *mm; + struct gmap_pgtable *mp; + struct gmap_rmap *rmap; + struct vm_area_struct *vma; + struct page *page; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + current->thread.gmap_addr = address; + mm = gmap->mm; + /* Walk the gmap address space page table */ + table = gmap->table + ((address >> 53) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 42) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 31) & 0x7ff); + if (unlikely(*table & _REGION_ENTRY_INV)) + return -EFAULT; + table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); + table = table + ((address >> 20) & 0x7ff); + + /* Convert the gmap address to an mm address. */ + segment = *table; + if (likely(!(segment & _SEGMENT_ENTRY_INV))) { + page = pfn_to_page(segment >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + return mp->vmaddr | (address & ~PMD_MASK); + } else if (segment & _SEGMENT_ENTRY_RO) { + vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; + vma = find_vma(mm, vmaddr); + if (!vma || vma->vm_start > vmaddr) + return -EFAULT; + + /* Walk the parent mm page table */ + pgd = pgd_offset(mm, vmaddr); + pud = pud_alloc(mm, pgd, vmaddr); + if (!pud) + return -ENOMEM; + pmd = pmd_alloc(mm, pud, vmaddr); + if (!pmd) + return -ENOMEM; + if (!pmd_present(*pmd) && + __pte_alloc(mm, vma, pmd, vmaddr)) + return -ENOMEM; + /* pmd now points to a valid segment table entry. */ + rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); + if (!rmap) + return -ENOMEM; + /* Link gmap segment table entry location to page table. */ + page = pmd_page(*pmd); + mp = (struct gmap_pgtable *) page->index; + rmap->entry = table; + list_add(&rmap->list, &mp->mapper); + /* Set gmap segment table entry to page table. */ + *table = pmd_val(*pmd) & PAGE_MASK; + return vmaddr | (address & ~PMD_MASK); + } + return -EFAULT; + +} +EXPORT_SYMBOL_GPL(gmap_fault); + +void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) +{ + struct gmap_rmap *rmap, *next; + struct gmap_pgtable *mp; + struct page *page; + int flush; + + flush = 0; + spin_lock(&mm->page_table_lock); + page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + list_for_each_entry_safe(rmap, next, &mp->mapper, list) { + *rmap->entry = + _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; + list_del(&rmap->list); + kfree(rmap); + flush = 1; + } + spin_unlock(&mm->page_table_lock); + if (flush) + __tlb_flush_global(); +} + +static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, + unsigned long vmaddr) { struct page *page; unsigned long *table; + struct gmap_pgtable *mp; page = alloc_page(GFP_KERNEL|__GFP_REPEAT); if (!page) return NULL; + mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); + if (!mp) { + __free_page(page); + return NULL; + } pgtable_page_ctor(page); + mp->vmaddr = vmaddr & PMD_MASK; + INIT_LIST_HEAD(&mp->mapper); + page->index = (unsigned long) mp; atomic_set(&page->_mapcount, 3); table = (unsigned long *) page_to_phys(page); clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); @@ -167,24 +512,57 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) static inline void page_table_free_pgste(unsigned long *table) { struct page *page; + struct gmap_pgtable *mp; page = pfn_to_page(__pa(table) >> PAGE_SHIFT); + mp = (struct gmap_pgtable *) page->index; + BUG_ON(!list_empty(&mp->mapper)); pgtable_page_ctor(page); atomic_set(&page->_mapcount, -1); + kfree(mp); __free_page(page); } -#endif -unsigned long *page_table_alloc(struct mm_struct *mm) +#else /* CONFIG_PGSTE */ + +static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, + unsigned long vmaddr) +{ +} + +static inline void page_table_free_pgste(unsigned long *table) +{ +} + +static inline void gmap_unmap_notifier(struct mm_struct *mm, + unsigned long *table) +{ +} + +#endif /* CONFIG_PGSTE */ + +static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) +{ + unsigned int old, new; + + do { + old = atomic_read(v); + new = old ^ bits; + } while (atomic_cmpxchg(v, old, new) != old); + return new; +} + +/* + * page table entry allocation/free routines. + */ +unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) { struct page *page; unsigned long *table; unsigned int mask, bit; -#ifdef CONFIG_PGSTE if (mm_has_pgste(mm)) - return page_table_alloc_pgste(mm); -#endif + return page_table_alloc_pgste(mm, vmaddr); /* Allocate fragments of a 4K page as 1K/2K page table */ spin_lock_bh(&mm->context.list_lock); mask = FRAG_MASK; @@ -222,10 +600,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) struct page *page; unsigned int bit, mask; -#ifdef CONFIG_PGSTE - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm)) { + gmap_unmap_notifier(mm, table); return page_table_free_pgste(table); -#endif + } /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); @@ -249,10 +627,8 @@ static void __page_table_free_rcu(void *table, unsigned bit) { struct page *page; -#ifdef CONFIG_PGSTE if (bit == FRAG_MASK) return page_table_free_pgste(table); -#endif /* Free 1K/2K page table fragment of a 4K page */ page = pfn_to_page(__pa(table) >> PAGE_SHIFT); if (atomic_xor_bits(&page->_mapcount, bit) == 0) { @@ -269,13 +645,12 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) unsigned int bit, mask; mm = tlb->mm; -#ifdef CONFIG_PGSTE if (mm_has_pgste(mm)) { + gmap_unmap_notifier(mm, table); table = (unsigned long *) (__pa(table) | FRAG_MASK); tlb_remove_table(tlb, table); return; } -#endif bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); spin_lock_bh(&mm->context.list_lock); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 8c1970d1dd91..781ff5169560 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -61,12 +61,12 @@ static inline pmd_t *vmem_pmd_alloc(void) return pmd; } -static pte_t __ref *vmem_pte_alloc(void) +static pte_t __ref *vmem_pte_alloc(unsigned long address) { pte_t *pte; if (slab_is_available()) - pte = (pte_t *) page_table_alloc(&init_mm); + pte = (pte_t *) page_table_alloc(&init_mm, address); else pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); if (!pte) @@ -120,7 +120,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) } #endif if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); @@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); -- cgit v1.2.3 From 598841ca9919d008b520114d8a4378c4ce4e40a1 Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Sun, 24 Jul 2011 10:48:21 +0200 Subject: [S390] use gmap address spaces for kvm guest images This patch switches kvm from using (Qemu's) user address space to Martin's gmap address space. This way QEMU does not have to use a linker script in order to fit large guests at low addresses in its address space. Signed-off-by: Carsten Otte Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/kvm/intercept.c | 18 +++++++++++++++++- arch/s390/kvm/kvm-s390.c | 32 +++++++++++++++++++++----------- 3 files changed, 40 insertions(+), 12 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index e9bcdca32a32..8264b0b0f1ce 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -227,6 +227,7 @@ struct kvm_vcpu_arch { struct cpuid cpu_id; u64 stidp_data; }; + struct gmap *gmap; }; struct kvm_vm_stat { @@ -237,6 +238,7 @@ struct kvm_arch{ struct sca_block *sca; debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; + struct gmap *gmap; }; extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index b5312050b224..654fc1fa37e7 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -160,6 +160,7 @@ static int handle_stop(struct kvm_vcpu *vcpu) static int handle_validity(struct kvm_vcpu *vcpu) { + unsigned long vmaddr; int viwhy = vcpu->arch.sie_block->ipb >> 16; int rc; @@ -170,12 +171,27 @@ static int handle_validity(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gmsor + vcpu->arch.sie_block->prefix, 2*PAGE_SIZE); - if (rc) + if (rc) { /* user will receive sigsegv, exit to user */ rc = -EOPNOTSUPP; + goto out; + } + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, + vcpu->arch.gmap); + if (IS_ERR_VALUE(vmaddr)) { + rc = -EOPNOTSUPP; + goto out; + } + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE, + vcpu->arch.gmap); + if (IS_ERR_VALUE(vmaddr)) { + rc = -EOPNOTSUPP; + goto out; + } } else rc = -EOPNOTSUPP; +out: if (rc) VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", viwhy); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 123ebea72282..3ebb4ba83d9d 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -190,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm) debug_register_view(kvm->arch.dbf, &debug_sprintf_view); VM_EVENT(kvm, 3, "%s", "vm created"); + kvm->arch.gmap = gmap_alloc(current->mm); + if (!kvm->arch.gmap) + goto out_nogmap; + return 0; +out_nogmap: + debug_unregister(kvm->arch.dbf); out_nodbf: free_page((unsigned long)(kvm->arch.sca)); out_err: @@ -235,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_vcpus(kvm); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); + gmap_free(kvm->arch.gmap); } /* Section: vcpu related */ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { + vcpu->arch.gmap = vcpu->kvm->arch.gmap; return 0; } @@ -285,7 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { - atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); + atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->eca = 0xC1002001U; @@ -454,6 +462,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) local_irq_disable(); kvm_guest_enter(); local_irq_enable(); + gmap_enable(vcpu->arch.gmap); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { @@ -462,6 +471,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); + gmap_disable(vcpu->arch.gmap); local_irq_disable(); kvm_guest_exit(); local_irq_enable(); @@ -479,13 +489,6 @@ rerun_vcpu: if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) kvm_s390_vcpu_set_mem(vcpu); - /* verify, that memory has been registered */ - if (!vcpu->arch.sie_block->gmslm) { - vcpu_put(vcpu); - VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu"); - return -EINVAL; - } - if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -681,10 +684,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (mem->guest_phys_addr) return -EINVAL; - if (mem->userspace_addr & (PAGE_SIZE - 1)) + if (mem->userspace_addr & 0xffffful) return -EINVAL; - if (mem->memory_size & (PAGE_SIZE - 1)) + if (mem->memory_size & 0xffffful) return -EINVAL; if (!user_alloc) @@ -698,15 +701,22 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot old, int user_alloc) { - int i; + int i, rc; struct kvm_vcpu *vcpu; + + rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, + mem->guest_phys_addr, mem->memory_size); + if (rc) + return; + /* request update of sie control block for all available vcpus */ kvm_for_each_vcpu(i, vcpu, kvm) { if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) continue; kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); } + return; } void kvm_arch_flush_shadow(struct kvm *kvm) -- cgit v1.2.3 From 092670cd90eb88c33661de21f1b4ee08d2597171 Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Sun, 24 Jul 2011 10:48:22 +0200 Subject: [S390] Use gmap translation for accessing guest memory This patch removes kvm-s390 internal assumption of a linear mapping of guest address space to user space. Previously, guest memory was translated to user addresses using a fixed offset (gmsor). The new code uses gmap_fault to resolve guest addresses. Signed-off-by: Carsten Otte Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/kvm_host.h | 4 +- arch/s390/kvm/gaccess.h | 243 ++++++++++++++++++++++++++++----------- arch/s390/kvm/intercept.c | 24 ++-- arch/s390/kvm/kvm-s390.c | 4 +- arch/s390/kvm/kvm-s390.h | 23 ---- arch/s390/kvm/sigp.c | 6 +- 6 files changed, 194 insertions(+), 110 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 8264b0b0f1ce..e5d082c4f3aa 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -93,9 +93,7 @@ struct kvm_s390_sie_block { __u32 scaol; /* 0x0064 */ __u8 reserved68[4]; /* 0x0068 */ __u32 todpr; /* 0x006c */ - __u8 reserved70[16]; /* 0x0070 */ - __u64 gmsor; /* 0x0080 */ - __u64 gmslm; /* 0x0088 */ + __u8 reserved70[32]; /* 0x0070 */ psw_t gpsw; /* 0x0090 */ __u64 gg14; /* 0x00a0 */ __u64 gg15; /* 0x00a8 */ diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 03c716a0f01f..c86f6ae43f76 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -1,5 +1,5 @@ /* - * gaccess.h - access guest memory + * access.h - access guest memory * * Copyright IBM Corp. 2008,2009 * @@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, unsigned long guestaddr) { unsigned long prefix = vcpu->arch.sie_block->prefix; - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); if (guestaddr < 2 * PAGE_SIZE) guestaddr += prefix; else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) guestaddr -= prefix; - if (guestaddr > memsize) - return (void __user __force *) ERR_PTR(-EFAULT); - - guestaddr += origin; - - return (void __user *) guestaddr; + return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap); } static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, @@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr, static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, unsigned long guestdest, - const void *from, unsigned long n) + void *from, unsigned long n) { int rc; unsigned long i; - const u8 *data = from; + u8 *data = from; for (i = 0; i < n; i++) { rc = put_guest_u8(vcpu, guestdest++, *(data++)); @@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, return 0; } +static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu, + unsigned long guestdest, + void *from, unsigned long n) +{ + int r; + void __user *uptr; + unsigned long size; + + if (guestdest + n < guestdest) + return -EFAULT; + + /* simple case: all within one segment table entry? */ + if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) { + uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_to_user(uptr, from, n); + + if (r) + r = -EFAULT; + + goto out; + } + + /* copy first segment */ + uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + size = PMD_SIZE - (guestdest & ~PMD_MASK); + + r = copy_to_user(uptr, from, size); + + if (r) { + r = -EFAULT; + goto out; + } + from += size; + n -= size; + guestdest += size; + + /* copy full segments */ + while (n >= PMD_SIZE) { + uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_to_user(uptr, from, PMD_SIZE); + + if (r) { + r = -EFAULT; + goto out; + } + from += PMD_SIZE; + n -= PMD_SIZE; + guestdest += PMD_SIZE; + } + + /* copy the tail segment */ + if (n) { + uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_to_user(uptr, from, n); + + if (r) + r = -EFAULT; + } +out: + return r; +} + +static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, + unsigned long guestdest, + void *from, unsigned long n) +{ + return __copy_to_guest_fast(vcpu, guestdest, from, n); +} + static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, - const void *from, unsigned long n) + void *from, unsigned long n) { unsigned long prefix = vcpu->arch.sie_block->prefix; - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) goto slowpath; @@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) guestdest -= prefix; - if (guestdest + n > memsize) - return -EFAULT; - - if (guestdest + n < guestdest) - return -EFAULT; - - guestdest += origin; - - return copy_to_user((void __user *) guestdest, from, n); + return __copy_to_guest_fast(vcpu, guestdest, from, n); slowpath: return __copy_to_guest_slow(vcpu, guestdest, from, n); } @@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, return 0; } -static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, - unsigned long guestsrc, unsigned long n) +static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to, + unsigned long guestsrc, + unsigned long n) { - unsigned long prefix = vcpu->arch.sie_block->prefix; - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); + int r; + void __user *uptr; + unsigned long size; - if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) - goto slowpath; + if (guestsrc + n < guestsrc) + return -EFAULT; - if ((guestsrc < prefix) && (guestsrc + n > prefix)) - goto slowpath; + /* simple case: all within one segment table entry? */ + if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) { + uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap); - if ((guestsrc < prefix + 2 * PAGE_SIZE) - && (guestsrc + n > prefix + 2 * PAGE_SIZE)) - goto slowpath; + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); - if (guestsrc < 2 * PAGE_SIZE) - guestsrc += prefix; - else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) - guestsrc -= prefix; + r = copy_from_user(to, uptr, n); - if (guestsrc + n > memsize) - return -EFAULT; + if (r) + r = -EFAULT; - if (guestsrc + n < guestsrc) - return -EFAULT; + goto out; + } - guestsrc += origin; + /* copy first segment */ + uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); - return copy_from_user(to, (void __user *) guestsrc, n); -slowpath: - return __copy_from_guest_slow(vcpu, to, guestsrc, n); -} + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); -static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, - unsigned long guestdest, - const void *from, unsigned long n) -{ - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); + size = PMD_SIZE - (guestsrc & ~PMD_MASK); - if (guestdest + n > memsize) - return -EFAULT; + r = copy_from_user(to, uptr, size); - if (guestdest + n < guestdest) - return -EFAULT; + if (r) { + r = -EFAULT; + goto out; + } + to += size; + n -= size; + guestsrc += size; + + /* copy full segments */ + while (n >= PMD_SIZE) { + uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); + + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); + + r = copy_from_user(to, uptr, PMD_SIZE); + + if (r) { + r = -EFAULT; + goto out; + } + to += PMD_SIZE; + n -= PMD_SIZE; + guestsrc += PMD_SIZE; + } + + /* copy the tail segment */ + if (n) { + uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap); - guestdest += origin; + if (IS_ERR((void __force *) uptr)) + return PTR_ERR((void __force *) uptr); - return copy_to_user((void __user *) guestdest, from, n); + r = copy_from_user(to, uptr, n); + + if (r) + r = -EFAULT; + } +out: + return r; } static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, unsigned long guestsrc, unsigned long n) { - unsigned long origin = vcpu->arch.sie_block->gmsor; - unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); + return __copy_from_guest_fast(vcpu, to, guestsrc, n); +} - if (guestsrc + n > memsize) - return -EFAULT; +static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, + unsigned long guestsrc, unsigned long n) +{ + unsigned long prefix = vcpu->arch.sie_block->prefix; - if (guestsrc + n < guestsrc) - return -EFAULT; + if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) + goto slowpath; - guestsrc += origin; + if ((guestsrc < prefix) && (guestsrc + n > prefix)) + goto slowpath; + + if ((guestsrc < prefix + 2 * PAGE_SIZE) + && (guestsrc + n > prefix + 2 * PAGE_SIZE)) + goto slowpath; + + if (guestsrc < 2 * PAGE_SIZE) + guestsrc += prefix; + else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) + guestsrc -= prefix; - return copy_from_user(to, (void __user *) guestsrc, n); + return __copy_from_guest_fast(vcpu, to, guestsrc, n); +slowpath: + return __copy_from_guest_slow(vcpu, to, guestsrc, n); } #endif diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 654fc1fa37e7..c7c51898984e 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -165,26 +165,30 @@ static int handle_validity(struct kvm_vcpu *vcpu) int rc; vcpu->stat.exit_validity++; - if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix - <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) { - rc = fault_in_pages_writeable((char __user *) - vcpu->arch.sie_block->gmsor + - vcpu->arch.sie_block->prefix, - 2*PAGE_SIZE); + if (viwhy == 0x37) { + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, + vcpu->arch.gmap); + if (IS_ERR_VALUE(vmaddr)) { + rc = -EOPNOTSUPP; + goto out; + } + rc = fault_in_pages_writeable((char __user *) vmaddr, + PAGE_SIZE); if (rc) { /* user will receive sigsegv, exit to user */ rc = -EOPNOTSUPP; goto out; } - vmaddr = gmap_fault(vcpu->arch.sie_block->prefix, + vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE, vcpu->arch.gmap); if (IS_ERR_VALUE(vmaddr)) { rc = -EOPNOTSUPP; goto out; } - vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE, - vcpu->arch.gmap); - if (IS_ERR_VALUE(vmaddr)) { + rc = fault_in_pages_writeable((char __user *) vmaddr, + PAGE_SIZE); + if (rc) { + /* user will receive sigsegv, exit to user */ rc = -EOPNOTSUPP; goto out; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3ebb4ba83d9d..5a99f342fd0b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -549,7 +549,7 @@ rerun_vcpu: return rc; } -static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, +static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from, unsigned long n, int prefix) { if (prefix) @@ -566,7 +566,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, */ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) { - const unsigned char archmode = 1; + unsigned char archmode = 1; int prefix; if (addr == KVM_S390_STORE_STATUS_NOADDR) { diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 65e2201a555b..63e5190776e9 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -58,31 +58,8 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); -static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.sie_block->gmslm - - vcpu->arch.sie_block->gmsor - - VIRTIODESCSPACE + 1ul; -} - static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) { - int idx; - struct kvm_memory_slot *mem; - struct kvm_memslots *memslots; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - memslots = kvm_memslots(vcpu->kvm); - - mem = &memslots->memslots[0]; - - vcpu->arch.sie_block->gmsor = mem->userspace_addr; - vcpu->arch.sie_block->gmslm = - mem->userspace_addr + - (mem->npages << PAGE_SHIFT) + - VIRTIODESCSPACE - 1ul; - - srcu_read_unlock(&vcpu->kvm->srcu, idx); } /* implemented in priv.c */ diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 702276f5e2fa..d6a50c1fb2e6 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -189,10 +189,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, /* make sure that the new value is valid memory */ address = address & 0x7fffe000u; - if ((copy_from_user(&tmp, (void __user *) - (address + vcpu->arch.sie_block->gmsor) , 1)) || - (copy_from_user(&tmp, (void __user *)(address + - vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) { + if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || + copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) { *reg |= SIGP_STAT_INVALID_PARAMETER; return 1; /* invalid parameter */ } -- cgit v1.2.3 From f7850c92884b40915001e332a0a33ed4f10158e8 Mon Sep 17 00:00:00 2001 From: Carsten Otte Date: Sun, 24 Jul 2011 10:48:23 +0200 Subject: [S390] remove kvm mmu reload on s390 This patch removes the mmu reload logic for kvm on s390. Via Martin's new gmap interface, we can safely add or remove memory slots while guest CPUs are in-flight. Thus, the mmu reload logic is not needed anymore. Signed-off-by: Carsten Otte Signed-off-by: Martin Schwidefsky --- arch/s390/kvm/kvm-s390.c | 17 ++--------------- arch/s390/kvm/kvm-s390.h | 4 ---- 2 files changed, 2 insertions(+), 19 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 5a99f342fd0b..f17296e4fc89 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -294,7 +294,6 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM); - set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests); vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) facilities; @@ -485,10 +484,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) sigset_t sigsaved; rerun_vcpu: - if (vcpu->requests) - if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) - kvm_s390_vcpu_set_mem(vcpu); - if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); @@ -701,21 +696,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot old, int user_alloc) { - int i, rc; - struct kvm_vcpu *vcpu; + int rc; rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, mem->guest_phys_addr, mem->memory_size); if (rc) - return; - - /* request update of sie control block for all available vcpus */ - kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) - continue; - kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); - } + printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); return; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 63e5190776e9..99b0b7597115 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -58,10 +58,6 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); -static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu) -{ -} - /* implemented in priv.c */ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From fdb204d1a7746a90b0d8a8665bf59af98c8c366a Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Sun, 24 Jul 2011 10:48:24 +0200 Subject: [S390] cleanup program check handler prototypes Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.h | 7 +++---- arch/s390/kernel/traps.c | 13 ++++--------- 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 17a6f83a2d67..66729eb7bbc5 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -5,10 +5,9 @@ #include #include -typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long); -extern pgm_check_handler_t *pgm_check_table[128]; -pgm_check_handler_t do_protection_exception; -pgm_check_handler_t do_dat_exception; +void do_protection_exception(struct pt_regs *, long, unsigned long); +void do_dat_exception(struct pt_regs *, long, unsigned long); +void do_asce_exception(struct pt_regs *, long, unsigned long); extern int sysctl_userprocess_debug; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a63d34c3611e..f386e167277a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -43,14 +43,10 @@ #include #include "entry.h" -pgm_check_handler_t *pgm_check_table[128]; +void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); int show_unhandled_signals; -extern pgm_check_handler_t do_protection_exception; -extern pgm_check_handler_t do_dat_exception; -extern pgm_check_handler_t do_asce_exception; - #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) #ifndef CONFIG_64BIT @@ -489,9 +485,8 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, #ifdef CONFIG_MATHEMU -asmlinkage void specification_exception(struct pt_regs *regs, - long pgm_int_code, - unsigned long trans_exc_code) +void specification_exception(struct pt_regs *regs, long pgm_int_code, + unsigned long trans_exc_code) { __u8 opcode[6]; __u16 __user *location = NULL; @@ -648,7 +643,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); } -asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs) +void __kprobes kernel_stack_overflow(struct pt_regs * regs) { bust_spinlocks(1); printk("Kernel stack overflow.\n"); -- cgit v1.2.3 From 89c9b66b104549a8698e412bf6f4140c1d0786fb Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Sun, 24 Jul 2011 10:48:27 +0200 Subject: [S390] race safe external interrupt registration The (un-)register_external_interrupt functions are not race safe if more than one interrupt handler is added or deleted for an external interrupt concurrently. Make the registration / unregistration of external interrupts race safe by using RCU and a spinlock. RCU is used to avoid a performance penalty in the external interrupt handler, the register and unregister functions are protected by the spinlock and are not performance critical. call_rcu must be used since the SCLP driver uses the interface with IRQs disabled. Also use the generic list implementation rather than homebrewn list code. Signed-off-by: Jan Glauber Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/irq.c | 83 ++++++++++++++++++++++++++++---------------------- 1 file changed, 46 insertions(+), 37 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index e3264f6a9720..1f4050d45f78 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -87,15 +87,6 @@ int show_interrupts(struct seq_file *p, void *v) return 0; } -/* - * For compatibilty only. S/390 specific setup of interrupts et al. is done - * much later in init_channel_subsystem(). - */ -void __init init_IRQ(void) -{ - /* nothing... */ -} - /* * Switch to the asynchronous interrupt stack for softirq execution. */ @@ -144,28 +135,45 @@ void init_irq_proc(void) #endif /* - * ext_int_hash[index] is the start of the list for all external interrupts - * that hash to this index. With the current set of external interrupts - * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 - * iucv and 0x2603 pfault) this is always the first element. + * ext_int_hash[index] is the list head for all external interrupts that hash + * to this index. */ +static struct list_head ext_int_hash[256]; struct ext_int_info { - struct ext_int_info *next; ext_int_handler_t handler; u16 code; + struct list_head entry; + struct rcu_head rcu; }; -static struct ext_int_info *ext_int_hash[256]; +/* ext_int_hash_lock protects the handler lists for external interrupts */ +DEFINE_SPINLOCK(ext_int_hash_lock); + +static void __init init_external_interrupts(void) +{ + int idx; + + for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++) + INIT_LIST_HEAD(&ext_int_hash[idx]); +} static inline int ext_hash(u16 code) { return (code + (code >> 9)) & 0xff; } +static void ext_int_hash_update(struct rcu_head *head) +{ + struct ext_int_info *p = container_of(head, struct ext_int_info, rcu); + + kfree(p); +} + int register_external_interrupt(u16 code, ext_int_handler_t handler) { struct ext_int_info *p; + unsigned long flags; int index; p = kmalloc(sizeof(*p), GFP_ATOMIC); @@ -174,33 +182,27 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler) p->code = code; p->handler = handler; index = ext_hash(code); - p->next = ext_int_hash[index]; - ext_int_hash[index] = p; + + spin_lock_irqsave(&ext_int_hash_lock, flags); + list_add_rcu(&p->entry, &ext_int_hash[index]); + spin_unlock_irqrestore(&ext_int_hash_lock, flags); return 0; } EXPORT_SYMBOL(register_external_interrupt); int unregister_external_interrupt(u16 code, ext_int_handler_t handler) { - struct ext_int_info *p, *q; - int index; + struct ext_int_info *p; + unsigned long flags; + int index = ext_hash(code); - index = ext_hash(code); - q = NULL; - p = ext_int_hash[index]; - while (p) { - if (p->code == code && p->handler == handler) - break; - q = p; - p = p->next; - } - if (!p) - return -ENOENT; - if (q) - q->next = p->next; - else - ext_int_hash[index] = p->next; - kfree(p); + spin_lock_irqsave(&ext_int_hash_lock, flags); + list_for_each_entry_rcu(p, &ext_int_hash[index], entry) + if (p->code == code && p->handler == handler) { + list_del_rcu(&p->entry); + call_rcu(&p->rcu, ext_int_hash_update); + } + spin_unlock_irqrestore(&ext_int_hash_lock, flags); return 0; } EXPORT_SYMBOL(unregister_external_interrupt); @@ -224,15 +226,22 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; if (code != 0x1004) __get_cpu_var(s390_idle).nohz_delay = 1; + index = ext_hash(code); - for (p = ext_int_hash[index]; p; p = p->next) { + rcu_read_lock(); + list_for_each_entry_rcu(p, &ext_int_hash[index], entry) if (likely(p->code == code)) p->handler(ext_int_code, param32, param64); - } + rcu_read_unlock(); irq_exit(); set_irq_regs(old_regs); } +void __init init_IRQ(void) +{ + init_external_interrupts(); +} + static DEFINE_SPINLOCK(sc_irq_lock); static int sc_irq_refcount; -- cgit v1.2.3 From 5beab99100335936f4d845f9ae2f0e25796f2584 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Sun, 24 Jul 2011 10:48:28 +0200 Subject: [S390] iucv cr0 enablement bit Do not set the cr0 enablement bit for iucv by default in head[31|64].S, move the enablement to iucv_init in the iucv base layer. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/head31.S | 2 +- arch/s390/kernel/head64.S | 2 +- net/iucv/iucv.c | 7 +++++-- 3 files changed, 7 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index dd0d1e272be9..f21954b44dc1 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -45,7 +45,7 @@ ENTRY(startup_continue) # virtual and never return ... .align 8 .Lentry:.long 0x00080000,0x80000000 + _stext -.Lctl: .long 0x04b50002 # cr0: various things +.Lctl: .long 0x04b50000 # cr0: various things .long 0 # cr1: primary space segment table .long .Lduct # cr2: dispatchable unit control table .long 0 # cr3: instruction authorization diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 188602898c17..b6d30135e2ec 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -46,7 +46,7 @@ ENTRY(startup_continue) .align 16 .LPG1: .Lentry:.quad 0x0000000180000000,_stext -.Lctl: .quad 0x04350002 # cr0: various things +.Lctl: .quad 0x04350000 # cr0: various things .quad 0 # cr1: primary space segment table .quad .Lduct # cr2: dispatchable unit control table .quad 0 # cr3: instruction authorization diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 7f9124914b13..f2b713847b45 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -1988,12 +1988,13 @@ static int __init iucv_init(void) rc = -EPROTONOSUPPORT; goto out; } + ctl_set_bit(0, 1); rc = iucv_query_maxconn(); if (rc) - goto out; + goto out_ctl; rc = register_external_interrupt(0x4000, iucv_external_interrupt); if (rc) - goto out; + goto out_ctl; iucv_root = root_device_register("iucv"); if (IS_ERR(iucv_root)) { rc = PTR_ERR(iucv_root); @@ -2055,6 +2056,8 @@ out_free: root_device_unregister(iucv_root); out_int: unregister_external_interrupt(0x4000, iucv_external_interrupt); +out_ctl: + ctl_clear_bit(0, 1); out: return rc; } -- cgit v1.2.3 From c76e70d3784e00a89bf8359bcca51ae366b41527 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Sun, 24 Jul 2011 10:48:29 +0200 Subject: [S390] initial cr0 bits Remove outdated bits from the initial cr0 register. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/head64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index b6d30135e2ec..ae5d492b069e 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -46,7 +46,7 @@ ENTRY(startup_continue) .align 16 .LPG1: .Lentry:.quad 0x0000000180000000,_stext -.Lctl: .quad 0x04350000 # cr0: various things +.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space .quad 0 # cr1: primary space segment table .quad .Lduct # cr2: dispatchable unit control table .quad 0 # cr3: instruction authorization -- cgit v1.2.3 From cadfce72778e9417baff117bb563a1c2f8fef97b Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Sun, 24 Jul 2011 10:48:30 +0200 Subject: [S390] disable cpu measurement alerts on a dying cpu The cpu measurement alerts that are used for instance by oprofile for hardware sampling are not turned off on a cpu that is going offline. Add the appropriate control register bit that should be disabled to the list. Signed-off-by: Jan Glauber Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1d55c95f617c..a6d85c0a7f20 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -654,7 +654,8 @@ int __cpu_disable(void) /* disable all external interrupts */ cr_parms.orvals[0] = 0; cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | - 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4); + 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 | + 1 << 4); /* disable all I/O interrupts */ cr_parms.orvals[6] = 0; cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | -- cgit v1.2.3 From 8bb3a2ebcf2a406a60d04f5a8756ea936b7f0bf3 Mon Sep 17 00:00:00 2001 From: Christian Ehrhardt Date: Sun, 24 Jul 2011 10:48:31 +0200 Subject: [S390] kvm: make sigp emerg smp capable SIGP emerg needs to pass the source vpu adress into __LC_CPU_ADDRESS of the target guest. Signed-off-by: Christian Ehrhardt Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/kvm_host.h | 5 +++++ arch/s390/kvm/interrupt.c | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index e5d082c4f3aa..00ff00dfb24c 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -174,6 +174,10 @@ struct kvm_s390_prefix_info { __u32 address; }; +struct kvm_s390_emerg_info { + __u16 code; +}; + struct kvm_s390_interrupt_info { struct list_head list; u64 type; @@ -181,6 +185,7 @@ struct kvm_s390_interrupt_info { struct kvm_s390_io_info io; struct kvm_s390_ext_info ext; struct kvm_s390_pgm_info pgm; + struct kvm_s390_emerg_info emerg; struct kvm_s390_prefix_info prefix; }; }; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 35c21bf910c5..c9aeb4b4d0b8 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -128,6 +128,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, if (rc == -EFAULT) exception = 1; + rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code); + if (rc == -EFAULT) + exception = 1; + rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); if (rc == -EFAULT) -- cgit v1.2.3 From 73b7d40ff1bcd44b4245c2714b88cf872fe44685 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Sun, 24 Jul 2011 10:48:33 +0200 Subject: [S390] use siginfo for sigtrap signals Provide additional information on SIGTRAP by using a sig_info signal. Use TRAP_BRKPT for breakpoints via illegal operation and TRAP_HWBKPT for breakpoints via program event recording. Provide the address of the instruction that caused the breakpoint via si_addr. While we are at it get rid of tracehook_consider_fatal_signal. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/traps.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index f386e167277a..e9372c77cced 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -325,10 +325,17 @@ static inline void __user *get_psw_address(struct pt_regs *regs, void __kprobes do_per_trap(struct pt_regs *regs) { + siginfo_t info; + if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; - if (current->ptrace) - force_sig(SIGTRAP, current); + if (!current->ptrace) + return; + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_HWBKPT; + info.si_addr = (void *) current->thread.per_event.address; + force_sig_info(SIGTRAP, &info, current); } static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, @@ -421,9 +428,13 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (current->ptrace) - force_sig(SIGTRAP, current); - else + if (current->ptrace) { + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_BRKPT; + info.si_addr = location; + force_sig_info(SIGTRAP, &info, current); + } else signal = SIGILL; #ifdef CONFIG_MATHEMU } else if (opcode[0] == 0xb3) { -- cgit v1.2.3