From 63b852a6b67d0820d388b0ecd0da83ccb4048b8d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:24 +0000 Subject: asm-generic: rename termios.h, signal.h and mman.h The existing asm-generic versions are incomplete and included by some architectures. New architectures should be able to use a generic version, so rename the existing files and change all users, which lets us add the new files. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/s390/include/asm/mman.h | 2 +- arch/s390/include/asm/signal.h | 2 +- arch/s390/include/asm/termios.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h index da01432e8f44..f63fe7b431ed 100644 --- a/arch/s390/include/asm/mman.h +++ b/arch/s390/include/asm/mman.h @@ -9,7 +9,7 @@ #ifndef __S390_MMAN_H__ #define __S390_MMAN_H__ -#include +#include #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h index f6cfddb278cb..cdf5cb2fe03f 100644 --- a/arch/s390/include/asm/signal.h +++ b/arch/s390/include/asm/signal.h @@ -115,7 +115,7 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#include +#include #ifdef __KERNEL__ struct old_sigaction { diff --git a/arch/s390/include/asm/termios.h b/arch/s390/include/asm/termios.h index 67f66278f533..bc3a35cefc96 100644 --- a/arch/s390/include/asm/termios.h +++ b/arch/s390/include/asm/termios.h @@ -60,7 +60,7 @@ struct termio { #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#include +#include #endif /* __KERNEL__ */ -- cgit v1.2.3 From c31ae4bb4a9fa4606a74c0a4fb61b74f804e861e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:25 +0000 Subject: asm-generic: introduce asm/bitsperlong.h This provides a reliable way for asm-generic/types.h and other files to find out if it is running on a 32 or 64 bit platform. We cannot use CONFIG_64BIT for this in headers that are included from user space because CONFIG symbols are not available there. We also cannot do it inside of asm/types.h because some headers need the word size but cannot include types.h. The solution is to introduce a new header that defines both __BITS_PER_LONG for user space and BITS_PER_LONG for usage in the kernel. The asm-generic version falls back to 32 bit unless the architecture overrides it, which I did for all 64 bit platforms. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/alpha/include/asm/bitsperlong.h | 8 ++++++++ arch/alpha/include/asm/types.h | 3 --- arch/arm/include/asm/bitsperlong.h | 1 + arch/avr32/include/asm/bitsperlong.h | 1 + arch/blackfin/include/asm/bitsperlong.h | 1 + arch/cris/include/asm/bitsperlong.h | 1 + arch/frv/include/asm/bitsperlong.h | 1 + arch/h8300/include/asm/bitsperlong.h | 1 + arch/ia64/include/asm/bitsperlong.h | 8 ++++++++ arch/ia64/include/asm/types.h | 7 ------- arch/m32r/include/asm/bitsperlong.h | 1 + arch/m68k/include/asm/bitsperlong.h | 1 + arch/microblaze/include/asm/bitsperlong.h | 1 + arch/mips/include/asm/bitsperlong.h | 8 ++++++++ arch/mips/include/asm/types.h | 3 --- arch/mn10300/include/asm/bitsperlong.h | 1 + arch/parisc/include/asm/bitsperlong.h | 20 +++++++++++++++++++ arch/parisc/include/asm/types.h | 8 -------- arch/powerpc/include/asm/bitsperlong.h | 12 ++++++++++++ arch/powerpc/include/asm/types.h | 9 --------- arch/s390/include/asm/bitsperlong.h | 13 +++++++++++++ arch/s390/include/asm/types.h | 6 ------ arch/sh/include/asm/bitsperlong.h | 1 + arch/sparc/include/asm/bitsperlong.h | 13 +++++++++++++ arch/sparc/include/asm/types.h | 4 ---- arch/x86/include/asm/bitsperlong.h | 13 +++++++++++++ arch/x86/include/asm/types.h | 6 ------ arch/xtensa/include/asm/bitsperlong.h | 1 + include/asm-generic/Kbuild | 1 + include/asm-generic/Kbuild.asm | 1 + include/asm-generic/bitsperlong.h | 32 +++++++++++++++++++++++++++++++ include/asm-generic/int-l64.h | 2 ++ include/asm-generic/int-ll64.h | 2 ++ 33 files changed, 145 insertions(+), 46 deletions(-) create mode 100644 arch/alpha/include/asm/bitsperlong.h create mode 100644 arch/arm/include/asm/bitsperlong.h create mode 100644 arch/avr32/include/asm/bitsperlong.h create mode 100644 arch/blackfin/include/asm/bitsperlong.h create mode 100644 arch/cris/include/asm/bitsperlong.h create mode 100644 arch/frv/include/asm/bitsperlong.h create mode 100644 arch/h8300/include/asm/bitsperlong.h create mode 100644 arch/ia64/include/asm/bitsperlong.h create mode 100644 arch/m32r/include/asm/bitsperlong.h create mode 100644 arch/m68k/include/asm/bitsperlong.h create mode 100644 arch/microblaze/include/asm/bitsperlong.h create mode 100644 arch/mips/include/asm/bitsperlong.h create mode 100644 arch/mn10300/include/asm/bitsperlong.h create mode 100644 arch/parisc/include/asm/bitsperlong.h create mode 100644 arch/powerpc/include/asm/bitsperlong.h create mode 100644 arch/s390/include/asm/bitsperlong.h create mode 100644 arch/sh/include/asm/bitsperlong.h create mode 100644 arch/sparc/include/asm/bitsperlong.h create mode 100644 arch/x86/include/asm/bitsperlong.h create mode 100644 arch/xtensa/include/asm/bitsperlong.h create mode 100644 include/asm-generic/bitsperlong.h (limited to 'arch/s390/include/asm') diff --git a/arch/alpha/include/asm/bitsperlong.h b/arch/alpha/include/asm/bitsperlong.h new file mode 100644 index 000000000000..ad57f7868203 --- /dev/null +++ b/arch/alpha/include/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_ALPHA_BITSPERLONG_H +#define __ASM_ALPHA_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_ALPHA_BITSPERLONG_H */ diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h index f072f344497e..bd621ecd1eb3 100644 --- a/arch/alpha/include/asm/types.h +++ b/arch/alpha/include/asm/types.h @@ -25,9 +25,6 @@ typedef unsigned int umode_t; * These aren't exported outside the kernel to avoid name space clashes */ #ifdef __KERNEL__ - -#define BITS_PER_LONG 64 - #ifndef __ASSEMBLY__ typedef u64 dma_addr_t; diff --git a/arch/arm/include/asm/bitsperlong.h b/arch/arm/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/arm/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/avr32/include/asm/bitsperlong.h b/arch/avr32/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/avr32/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/blackfin/include/asm/bitsperlong.h b/arch/blackfin/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/blackfin/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/cris/include/asm/bitsperlong.h b/arch/cris/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/cris/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/frv/include/asm/bitsperlong.h b/arch/frv/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/frv/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/h8300/include/asm/bitsperlong.h b/arch/h8300/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/h8300/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/ia64/include/asm/bitsperlong.h b/arch/ia64/include/asm/bitsperlong.h new file mode 100644 index 000000000000..ec4db3c970b7 --- /dev/null +++ b/arch/ia64/include/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_IA64_BITSPERLONG_H +#define __ASM_IA64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_IA64_BITSPERLONG_H */ diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h index e36b3716e718..fbf1ed3b44ce 100644 --- a/arch/ia64/include/asm/types.h +++ b/arch/ia64/include/asm/types.h @@ -19,10 +19,6 @@ # define __IA64_UL(x) (x) # define __IA64_UL_CONST(x) x -# ifdef __KERNEL__ -# define BITS_PER_LONG 64 -# endif - #else # define __IA64_UL(x) ((unsigned long)(x)) # define __IA64_UL_CONST(x) x##UL @@ -34,10 +30,7 @@ typedef unsigned int umode_t; */ # ifdef __KERNEL__ -#define BITS_PER_LONG 64 - /* DMA addresses are 64-bits wide, in general. */ - typedef u64 dma_addr_t; # endif /* __KERNEL__ */ diff --git a/arch/m32r/include/asm/bitsperlong.h b/arch/m32r/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/m32r/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/m68k/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/microblaze/include/asm/bitsperlong.h b/arch/microblaze/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/microblaze/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/mips/include/asm/bitsperlong.h b/arch/mips/include/asm/bitsperlong.h new file mode 100644 index 000000000000..3e4c10a8e787 --- /dev/null +++ b/arch/mips/include/asm/bitsperlong.h @@ -0,0 +1,8 @@ +#ifndef __ASM_MIPS_BITSPERLONG_H +#define __ASM_MIPS_BITSPERLONG_H + +#define __BITS_PER_LONG _MIPS_SZLONG + +#include + +#endif /* __ASM_MIPS_BITSPERLONG_H */ diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h index 7956e69a3bd5..544a2854598f 100644 --- a/arch/mips/include/asm/types.h +++ b/arch/mips/include/asm/types.h @@ -31,9 +31,6 @@ typedef unsigned short umode_t; * These aren't exported outside the kernel to avoid name space clashes */ #ifdef __KERNEL__ - -#define BITS_PER_LONG _MIPS_SZLONG - #ifndef __ASSEMBLY__ #if (defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) \ diff --git a/arch/mn10300/include/asm/bitsperlong.h b/arch/mn10300/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/mn10300/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/parisc/include/asm/bitsperlong.h b/arch/parisc/include/asm/bitsperlong.h new file mode 100644 index 000000000000..75196b415d3f --- /dev/null +++ b/arch/parisc/include/asm/bitsperlong.h @@ -0,0 +1,20 @@ +#ifndef __ASM_PARISC_BITSPERLONG_H +#define __ASM_PARISC_BITSPERLONG_H + +/* + * using CONFIG_* outside of __KERNEL__ is wrong, + * __LP64__ was also removed from headers, so what + * is the right approach on parisc? + * -arnd + */ +#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__) +#define __BITS_PER_LONG 64 +#define SHIFT_PER_LONG 6 +#else +#define __BITS_PER_LONG 32 +#define SHIFT_PER_LONG 5 +#endif + +#include + +#endif /* __ASM_PARISC_BITSPERLONG_H */ diff --git a/arch/parisc/include/asm/types.h b/arch/parisc/include/asm/types.h index 7f5a39bfb4ce..20135cc80039 100644 --- a/arch/parisc/include/asm/types.h +++ b/arch/parisc/include/asm/types.h @@ -14,14 +14,6 @@ typedef unsigned short umode_t; */ #ifdef __KERNEL__ -#ifdef CONFIG_64BIT -#define BITS_PER_LONG 64 -#define SHIFT_PER_LONG 6 -#else -#define BITS_PER_LONG 32 -#define SHIFT_PER_LONG 5 -#endif - #ifndef __ASSEMBLY__ /* Dma addresses are 32-bits wide. */ diff --git a/arch/powerpc/include/asm/bitsperlong.h b/arch/powerpc/include/asm/bitsperlong.h new file mode 100644 index 000000000000..5f1659032c40 --- /dev/null +++ b/arch/powerpc/include/asm/bitsperlong.h @@ -0,0 +1,12 @@ +#ifndef __ASM_POWERPC_BITSPERLONG_H +#define __ASM_POWERPC_BITSPERLONG_H + +#if defined(__powerpc64__) +# define __BITS_PER_LONG 64 +#else +# define __BITS_PER_LONG 32 +#endif + +#include + +#endif /* __ASM_POWERPC_BITSPERLONG_H */ diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index 7ce27a52bb34..a5aea0ca34e9 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h @@ -40,15 +40,6 @@ typedef struct { #endif /* __ASSEMBLY__ */ #ifdef __KERNEL__ -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __powerpc64__ -#define BITS_PER_LONG 64 -#else -#define BITS_PER_LONG 32 -#endif - #ifndef __ASSEMBLY__ typedef __vector128 vector128; diff --git a/arch/s390/include/asm/bitsperlong.h b/arch/s390/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6b235aea9c66 --- /dev/null +++ b/arch/s390/include/asm/bitsperlong.h @@ -0,0 +1,13 @@ +#ifndef __ASM_S390_BITSPERLONG_H +#define __ASM_S390_BITSPERLONG_H + +#ifndef __s390x__ +#define __BITS_PER_LONG 32 +#else +#define __BITS_PER_LONG 64 +#endif + +#include + +#endif /* __ASM_S390_BITSPERLONG_H */ + diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h index 3dc3fc228812..04d6b95a89c6 100644 --- a/arch/s390/include/asm/types.h +++ b/arch/s390/include/asm/types.h @@ -28,12 +28,6 @@ typedef __signed__ long saddr_t; */ #ifdef __KERNEL__ -#ifndef __s390x__ -#define BITS_PER_LONG 32 -#else -#define BITS_PER_LONG 64 -#endif - #ifndef __ASSEMBLY__ typedef u64 dma64_addr_t; diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/sh/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/arch/sparc/include/asm/bitsperlong.h b/arch/sparc/include/asm/bitsperlong.h new file mode 100644 index 000000000000..40dcaa3aaa56 --- /dev/null +++ b/arch/sparc/include/asm/bitsperlong.h @@ -0,0 +1,13 @@ +#ifndef __ASM_ALPHA_BITSPERLONG_H +#define __ASM_ALPHA_BITSPERLONG_H + +#if defined(__sparc__) && defined(__arch64__) +#define __BITS_PER_LONG 64 +#else +#define __BITS_PER_LONG 32 +#endif + +#include + +#endif /* __ASM_ALPHA_BITSPERLONG_H */ + diff --git a/arch/sparc/include/asm/types.h b/arch/sparc/include/asm/types.h index 2237118825d0..de671d73baed 100644 --- a/arch/sparc/include/asm/types.h +++ b/arch/sparc/include/asm/types.h @@ -21,8 +21,6 @@ typedef unsigned short umode_t; #ifdef __KERNEL__ -#define BITS_PER_LONG 64 - #ifndef __ASSEMBLY__ /* Dma addresses come in generic and 64-bit flavours. */ @@ -46,8 +44,6 @@ typedef unsigned short umode_t; #ifdef __KERNEL__ -#define BITS_PER_LONG 32 - #ifndef __ASSEMBLY__ typedef u32 dma_addr_t; diff --git a/arch/x86/include/asm/bitsperlong.h b/arch/x86/include/asm/bitsperlong.h new file mode 100644 index 000000000000..b0ae1c4dc791 --- /dev/null +++ b/arch/x86/include/asm/bitsperlong.h @@ -0,0 +1,13 @@ +#ifndef __ASM_X86_BITSPERLONG_H +#define __ASM_X86_BITSPERLONG_H + +#ifdef __x86_64__ +# define __BITS_PER_LONG 64 +#else +# define __BITS_PER_LONG 32 +#endif + +#include + +#endif /* __ASM_X86_BITSPERLONG_H */ + diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h index e6f736320077..09b97745772f 100644 --- a/arch/x86/include/asm/types.h +++ b/arch/x86/include/asm/types.h @@ -14,12 +14,6 @@ typedef unsigned short umode_t; */ #ifdef __KERNEL__ -#ifdef CONFIG_X86_32 -# define BITS_PER_LONG 32 -#else -# define BITS_PER_LONG 64 -#endif - #ifndef __ASSEMBLY__ typedef u64 dma64_addr_t; diff --git a/arch/xtensa/include/asm/bitsperlong.h b/arch/xtensa/include/asm/bitsperlong.h new file mode 100644 index 000000000000..6dc0bb0c13b2 --- /dev/null +++ b/arch/xtensa/include/asm/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 460b08d51e2e..cbb437875f5c 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -1,3 +1,4 @@ +header-y += bitsperlong.h header-y += errno-base.h header-y += errno.h header-y += fcntl.h diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index 70d185534b9d..290910e4ede4 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -9,6 +9,7 @@ unifdef-y += a.out.h endif unifdef-y += auxvec.h unifdef-y += byteorder.h +unifdef-y += bitsperlong.h unifdef-y += errno.h unifdef-y += fcntl.h unifdef-y += ioctl.h diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h new file mode 100644 index 000000000000..4ae54e07de83 --- /dev/null +++ b/include/asm-generic/bitsperlong.h @@ -0,0 +1,32 @@ +#ifndef __ASM_GENERIC_BITS_PER_LONG +#define __ASM_GENERIC_BITS_PER_LONG + +/* + * There seems to be no way of detecting this automatically from user + * space, so 64 bit architectures should override this in their + * bitsperlong.h. In particular, an architecture that supports + * both 32 and 64 bit user space must not rely on CONFIG_64BIT + * to decide it, but rather check a compiler provided macro. + */ +#ifndef __BITS_PER_LONG +#define __BITS_PER_LONG 32 +#endif + +#ifdef __KERNEL__ + +#ifdef CONFIG_64BIT +#define BITS_PER_LONG 64 +#else +#define BITS_PER_LONG 32 +#endif /* CONFIG_64BIT */ + +/* + * FIXME: The check currently breaks x86-64 build, so it's + * temporarily disabled. Please fix x86-64 and reenable + */ +#if 0 && BITS_PER_LONG != __BITS_PER_LONG +#error Inconsistent word size. Check asm/bitsperlong.h +#endif + +#endif /* __KERNEL__ */ +#endif /* __ASM_GENERIC_BITS_PER_LONG */ diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h index 2af9b75d77db..1ca3efc976cc 100644 --- a/include/asm-generic/int-l64.h +++ b/include/asm-generic/int-l64.h @@ -8,6 +8,8 @@ #ifndef _ASM_GENERIC_INT_L64_H #define _ASM_GENERIC_INT_L64_H +#include + #ifndef __ASSEMBLY__ /* * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h index f9bc9ac29b36..f394147c0739 100644 --- a/include/asm-generic/int-ll64.h +++ b/include/asm-generic/int-ll64.h @@ -8,6 +8,8 @@ #ifndef _ASM_GENERIC_INT_LL64_H #define _ASM_GENERIC_INT_LL64_H +#include + #ifndef __ASSEMBLY__ /* * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the -- cgit v1.2.3 From 72099ed2719fc5829bd79c6ca9d1783ed026eb37 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:29 +0000 Subject: asm-generic: rename atomic.h to atomic-long.h The existing asm-generic/atomic.h only defines the atomic_long type. This renames it to atomic-long.h so we have a place to add a truly generic atomic.h that can be used on all non-SMP systems. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann Acked-by: Ingo Molnar --- arch/alpha/include/asm/atomic.h | 2 +- arch/arm/include/asm/atomic.h | 2 +- arch/avr32/include/asm/atomic.h | 2 +- arch/blackfin/include/asm/atomic.h | 2 +- arch/cris/include/asm/atomic.h | 2 +- arch/frv/include/asm/atomic.h | 2 +- arch/h8300/include/asm/atomic.h | 2 +- arch/ia64/include/asm/atomic.h | 2 +- arch/m32r/include/asm/atomic.h | 2 +- arch/m68k/include/asm/atomic_mm.h | 2 +- arch/m68k/include/asm/atomic_no.h | 2 +- arch/microblaze/include/asm/atomic.h | 2 +- arch/mips/include/asm/atomic.h | 2 +- arch/mn10300/include/asm/atomic.h | 2 +- arch/parisc/include/asm/atomic.h | 2 +- arch/powerpc/include/asm/atomic.h | 2 +- arch/s390/include/asm/atomic.h | 2 +- arch/sh/include/asm/atomic.h | 2 +- arch/sparc/include/asm/atomic_32.h | 2 +- arch/sparc/include/asm/atomic_64.h | 2 +- arch/x86/include/asm/atomic_32.h | 2 +- arch/x86/include/asm/atomic_64.h | 2 +- arch/xtensa/include/asm/atomic.h | 2 +- include/asm-generic/atomic-long.h | 258 +++++++++++++++++++++++++++++++++++ include/asm-generic/atomic.h | 258 ----------------------------------- include/asm-generic/bitops/atomic.h | 1 + 26 files changed, 282 insertions(+), 281 deletions(-) create mode 100644 include/asm-generic/atomic-long.h delete mode 100644 include/asm-generic/atomic.h (limited to 'arch/s390/include/asm') diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index 62b363584b2b..610dff44d94b 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -256,5 +256,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include +#include #endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index 16b52f397983..9e07fe507029 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h @@ -249,6 +249,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include +#include #endif #endif diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h index 318815107748..b131c27ddf57 100644 --- a/arch/avr32/include/asm/atomic.h +++ b/arch/avr32/include/asm/atomic.h @@ -196,6 +196,6 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ASM_AVR32_ATOMIC_H */ diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h index 94b2a9b19451..7bbf44e4ddf9 100644 --- a/arch/blackfin/include/asm/atomic.h +++ b/arch/blackfin/include/asm/atomic.h @@ -208,6 +208,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) -#include +#include #endif /* __ARCH_BLACKFIN_ATOMIC __ */ diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h index 5718dd8902a1..a6aca819e9f3 100644 --- a/arch/cris/include/asm/atomic.h +++ b/arch/cris/include/asm/atomic.h @@ -158,5 +158,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 296c35cfb207..0409d981fd39 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -194,5 +194,5 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -#include +#include #endif /* _ASM_ATOMIC_H */ diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h index 833186c8dc3b..33c8c0fa9583 100644 --- a/arch/h8300/include/asm/atomic.h +++ b/arch/h8300/include/asm/atomic.h @@ -141,5 +141,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ARCH_H8300_ATOMIC __ */ diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index d37292bd9875..88405cb0832a 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -216,5 +216,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_IA64_ATOMIC_H */ diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 2eed30f84080..63f0cf0f50dd 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -314,5 +314,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m68k/include/asm/atomic_mm.h b/arch/m68k/include/asm/atomic_mm.h index eb0ab9d4ee77..88b7af20a996 100644 --- a/arch/m68k/include/asm/atomic_mm.h +++ b/arch/m68k/include/asm/atomic_mm.h @@ -192,5 +192,5 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ARCH_M68K_ATOMIC __ */ diff --git a/arch/m68k/include/asm/atomic_no.h b/arch/m68k/include/asm/atomic_no.h index 6bb674855a3f..5674cb9449bd 100644 --- a/arch/m68k/include/asm/atomic_no.h +++ b/arch/m68k/include/asm/atomic_no.h @@ -151,5 +151,5 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) -#include +#include #endif /* __ARCH_M68KNOMMU_ATOMIC __ */ diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h index a448d94ab721..0de612ad7cb2 100644 --- a/arch/microblaze/include/asm/atomic.h +++ b/arch/microblaze/include/asm/atomic.h @@ -118,6 +118,6 @@ static inline int atomic_dec_if_positive(atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_MICROBLAZE_ATOMIC_H */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 1b332e15ab52..eb7f01cfd1ac 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -793,6 +793,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() smp_llsc_mb() #define smp_mb__after_atomic_inc() smp_llsc_mb() -#include +#include #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index bc064825f9b1..5bf5be9566de 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -151,7 +151,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __KERNEL__ */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index ada3e5364d82..7eeaff944360 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -338,6 +338,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* CONFIG_64BIT */ -#include +#include #endif /* _ASM_PARISC_ATOMIC_H_ */ diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index b401950f5259..b7d2d07b6f96 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -472,6 +472,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) #endif /* __powerpc64__ */ -#include +#include #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */ diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index de432f2de2d2..fca9dffcc669 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -275,6 +275,6 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#include +#include #endif /* __KERNEL__ */ #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 6327ffbb1992..a5647d0cd179 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -84,5 +84,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __ASM_SH_ATOMIC_H */ diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index bb91b1248cd1..f0d343c3b956 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -161,5 +161,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v) #endif /* !(__KERNEL__) */ -#include +#include #endif /* !(__ARCH_SPARC_ATOMIC__) */ diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index a0a706492696..f2e48009989e 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -114,5 +114,5 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* !(__ARCH_SPARC64_ATOMIC__) */ diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index 85b46fba4229..c83d31486081 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h @@ -247,5 +247,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_X86_ATOMIC_32_H */ diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h index 8c21731984da..0d6360220007 100644 --- a/arch/x86/include/asm/atomic_64.h +++ b/arch/x86/include/asm/atomic_64.h @@ -455,5 +455,5 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* _ASM_X86_ATOMIC_64_H */ diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h index 67ad67bed8c1..22d6dde42619 100644 --- a/arch/xtensa/include/asm/atomic.h +++ b/arch/xtensa/include/asm/atomic.h @@ -292,7 +292,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() -#include +#include #endif /* __KERNEL__ */ #endif /* _XTENSA_ATOMIC_H */ diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h new file mode 100644 index 000000000000..76e27d66c055 --- /dev/null +++ b/include/asm-generic/atomic-long.h @@ -0,0 +1,258 @@ +#ifndef _ASM_GENERIC_ATOMIC_LONG_H +#define _ASM_GENERIC_ATOMIC_LONG_H +/* + * Copyright (C) 2005 Silicon Graphics, Inc. + * Christoph Lameter + * + * Allows to provide arch independent atomic definitions without the need to + * edit all arch specific atomic.h files. + */ + +#include + +/* + * Suppport for atomic_long_t + * + * Casts for parameters are avoided for existing atomic functions in order to + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the + * macros of a platform may have. + */ + +#if BITS_PER_LONG == 64 + +typedef atomic64_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + atomic64_sub(i, v); +} + +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_sub_and_test(i, v); +} + +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_dec_and_test(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_inc_and_test(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return atomic64_add_negative(i, v); +} + +static inline long atomic_long_add_return(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_return(i, v); +} + +static inline long atomic_long_sub_return(long i, atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_sub_return(i, v); +} + +static inline long atomic_long_inc_return(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_inc_return(v); +} + +static inline long atomic_long_dec_return(atomic_long_t *l) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_dec_return(v); +} + +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic64_t *v = (atomic64_t *)l; + + return (long)atomic64_add_unless(v, a, u); +} + +#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) + +#define atomic_long_cmpxchg(l, old, new) \ + (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) +#define atomic_long_xchg(v, new) \ + (atomic64_xchg((atomic64_t *)(l), (new))) + +#else /* BITS_PER_LONG == 64 */ + +typedef atomic_t atomic_long_t; + +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +static inline long atomic_long_read(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_read(v); +} + +static inline void atomic_long_set(atomic_long_t *l, long i) +{ + atomic_t *v = (atomic_t *)l; + + atomic_set(v, i); +} + +static inline void atomic_long_inc(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_inc(v); +} + +static inline void atomic_long_dec(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_dec(v); +} + +static inline void atomic_long_add(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_add(i, v); +} + +static inline void atomic_long_sub(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + atomic_sub(i, v); +} + +static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_sub_and_test(i, v); +} + +static inline int atomic_long_dec_and_test(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_dec_and_test(v); +} + +static inline int atomic_long_inc_and_test(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_inc_and_test(v); +} + +static inline int atomic_long_add_negative(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return atomic_add_negative(i, v); +} + +static inline long atomic_long_add_return(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_return(i, v); +} + +static inline long atomic_long_sub_return(long i, atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_sub_return(i, v); +} + +static inline long atomic_long_inc_return(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_inc_return(v); +} + +static inline long atomic_long_dec_return(atomic_long_t *l) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_dec_return(v); +} + +static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) +{ + atomic_t *v = (atomic_t *)l; + + return (long)atomic_add_unless(v, a, u); +} + +#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) + +#define atomic_long_cmpxchg(l, old, new) \ + (atomic_cmpxchg((atomic_t *)(l), (old), (new))) +#define atomic_long_xchg(v, new) \ + (atomic_xchg((atomic_t *)(v), (new))) + +#endif /* BITS_PER_LONG == 64 */ + +#endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h deleted file mode 100644 index 3673a13b6703..000000000000 --- a/include/asm-generic/atomic.h +++ /dev/null @@ -1,258 +0,0 @@ -#ifndef _ASM_GENERIC_ATOMIC_H -#define _ASM_GENERIC_ATOMIC_H -/* - * Copyright (C) 2005 Silicon Graphics, Inc. - * Christoph Lameter - * - * Allows to provide arch independent atomic definitions without the need to - * edit all arch specific atomic.h files. - */ - -#include - -/* - * Suppport for atomic_long_t - * - * Casts for parameters are avoided for existing atomic functions in order to - * avoid issues with cast-as-lval under gcc 4.x and other limitations that the - * macros of a platform may have. - */ - -#if BITS_PER_LONG == 64 - -typedef atomic64_t atomic_long_t; - -#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) - -static inline long atomic_long_read(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_read(v); -} - -static inline void atomic_long_set(atomic_long_t *l, long i) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_set(v, i); -} - -static inline void atomic_long_inc(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_inc(v); -} - -static inline void atomic_long_dec(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_dec(v); -} - -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_add(i, v); -} - -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_sub(i, v); -} - -static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_sub_and_test(i, v); -} - -static inline int atomic_long_dec_and_test(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_dec_and_test(v); -} - -static inline int atomic_long_inc_and_test(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_inc_and_test(v); -} - -static inline int atomic_long_add_negative(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_add_negative(i, v); -} - -static inline long atomic_long_add_return(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_add_return(i, v); -} - -static inline long atomic_long_sub_return(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_sub_return(i, v); -} - -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_inc_return(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_dec_return(v); -} - -static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_add_unless(v, a, u); -} - -#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) - -#define atomic_long_cmpxchg(l, old, new) \ - (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) -#define atomic_long_xchg(v, new) \ - (atomic64_xchg((atomic64_t *)(l), (new))) - -#else /* BITS_PER_LONG == 64 */ - -typedef atomic_t atomic_long_t; - -#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -static inline long atomic_long_read(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_read(v); -} - -static inline void atomic_long_set(atomic_long_t *l, long i) -{ - atomic_t *v = (atomic_t *)l; - - atomic_set(v, i); -} - -static inline void atomic_long_inc(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_inc(v); -} - -static inline void atomic_long_dec(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_dec(v); -} - -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_add(i, v); -} - -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_sub(i, v); -} - -static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_sub_and_test(i, v); -} - -static inline int atomic_long_dec_and_test(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_dec_and_test(v); -} - -static inline int atomic_long_inc_and_test(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_inc_and_test(v); -} - -static inline int atomic_long_add_negative(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return atomic_add_negative(i, v); -} - -static inline long atomic_long_add_return(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_add_return(i, v); -} - -static inline long atomic_long_sub_return(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_sub_return(i, v); -} - -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_inc_return(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_dec_return(v); -} - -static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_add_unless(v, a, u); -} - -#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) - -#define atomic_long_cmpxchg(l, old, new) \ - (atomic_cmpxchg((atomic_t *)(l), (old), (new))) -#define atomic_long_xchg(v, new) \ - (atomic_xchg((atomic_t *)(v), (new))) - -#endif /* BITS_PER_LONG == 64 */ - -#endif /* _ASM_GENERIC_ATOMIC_H */ diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h index 4657f3e410fc..c8946465e63a 100644 --- a/include/asm-generic/bitops/atomic.h +++ b/include/asm-generic/bitops/atomic.h @@ -2,6 +2,7 @@ #define _ASM_GENERIC_BITOPS_ATOMIC_H_ #include +#include #ifdef CONFIG_SMP #include -- cgit v1.2.3 From 5b17e1cd8928ae65932758ce6478ac6d3e9a86b2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 13 May 2009 22:56:30 +0000 Subject: asm-generic: rename page.h and uaccess.h The current asm-generic/page.h only contains the get_order function, and asm-generic/uaccess.h only implements unaligned accesses. This renames the file to getorder.h and uaccess-unaligned.h to make room for new page.h and uaccess.h file that will be usable by all simple (e.g. nommu) architectures. Signed-off-by: Remis Lima Baima Signed-off-by: Arnd Bergmann --- arch/alpha/include/asm/page.h | 2 +- arch/arm/include/asm/page.h | 2 +- arch/blackfin/include/asm/page.h | 2 +- arch/cris/include/asm/page.h | 2 +- arch/frv/include/asm/page.h | 2 +- arch/h8300/include/asm/page.h | 2 +- arch/m32r/include/asm/page.h | 2 +- arch/m68k/include/asm/page_mm.h | 2 +- arch/m68k/include/asm/page_no.h | 2 +- arch/microblaze/include/asm/page.h | 2 +- arch/mips/include/asm/page.h | 2 +- arch/parisc/include/asm/page.h | 2 +- arch/parisc/include/asm/uaccess.h | 2 +- arch/powerpc/include/asm/page_32.h | 2 +- arch/powerpc/include/asm/page_64.h | 2 +- arch/s390/include/asm/page.h | 2 +- arch/sh/include/asm/page.h | 2 +- arch/sparc/include/asm/page_32.h | 2 +- arch/sparc/include/asm/page_64.h | 2 +- arch/sparc/include/asm/uaccess_64.h | 2 +- arch/um/include/asm/page.h | 2 +- arch/x86/include/asm/page.h | 2 +- arch/xtensa/include/asm/page.h | 2 +- include/asm-generic/getorder.h | 24 ++++++++++++++++++++++++ include/asm-generic/page.h | 24 ------------------------ include/asm-generic/uaccess-unaligned.h | 26 ++++++++++++++++++++++++++ include/asm-generic/uaccess.h | 26 -------------------------- 27 files changed, 73 insertions(+), 73 deletions(-) create mode 100644 include/asm-generic/getorder.h delete mode 100644 include/asm-generic/page.h create mode 100644 include/asm-generic/uaccess-unaligned.h delete mode 100644 include/asm-generic/uaccess.h (limited to 'arch/s390/include/asm') diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h index 0995f9d13417..07af062544fb 100644 --- a/arch/alpha/include/asm/page.h +++ b/arch/alpha/include/asm/page.h @@ -93,6 +93,6 @@ typedef struct page *pgtable_t; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _ALPHA_PAGE_H */ diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 7b522770f29d..be962c1349c4 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -202,6 +202,6 @@ typedef struct page *pgtable_t; (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#include +#include #endif diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h index 344f6a8c1f22..3ea2016a1d4a 100644 --- a/arch/blackfin/include/asm/page.h +++ b/arch/blackfin/include/asm/page.h @@ -81,7 +81,7 @@ extern unsigned long memory_end; #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ ((void *)(kaddr) < (void *)memory_end)) -#include +#include #endif /* __ASSEMBLY__ */ diff --git a/arch/cris/include/asm/page.h b/arch/cris/include/asm/page.h index f3fdbd09c34c..be45ee366be9 100644 --- a/arch/cris/include/asm/page.h +++ b/arch/cris/include/asm/page.h @@ -68,7 +68,7 @@ typedef struct page *pgtable_t; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _CRIS_PAGE_H */ diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h index bd9c220094c7..25c6a5002355 100644 --- a/arch/frv/include/asm/page.h +++ b/arch/frv/include/asm/page.h @@ -73,6 +73,6 @@ extern unsigned long max_pfn; #endif /* __ASSEMBLY__ */ #include -#include +#include #endif /* _ASM_PAGE_H */ diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h index 0b6acf0b03aa..837381a2df46 100644 --- a/arch/h8300/include/asm/page.h +++ b/arch/h8300/include/asm/page.h @@ -73,6 +73,6 @@ extern unsigned long memory_end; #endif /* __ASSEMBLY__ */ #include -#include +#include #endif /* _H8300_PAGE_H */ diff --git a/arch/m32r/include/asm/page.h b/arch/m32r/include/asm/page.h index c9333089fe11..11777f7a5628 100644 --- a/arch/m32r/include/asm/page.h +++ b/arch/m32r/include/asm/page.h @@ -82,6 +82,6 @@ typedef struct page *pgtable_t; #define devmem_is_allowed(x) 1 #include -#include +#include #endif /* _ASM_M32R_PAGE_H */ diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index a34b8bad7847..d009f3ea39ab 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -223,6 +223,6 @@ static inline __attribute_const__ int __virt_to_node_shift(void) #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#include +#include #endif /* _M68K_PAGE_H */ diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h index 3a1ede4544cb..9aa3f90f4855 100644 --- a/arch/m68k/include/asm/page_no.h +++ b/arch/m68k/include/asm/page_no.h @@ -72,6 +72,6 @@ extern unsigned long memory_end; #endif /* __ASSEMBLY__ */ -#include +#include #endif /* _M68KNOMMU_PAGE_H */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 7238dcfcc517..962c210e5b9a 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -135,6 +135,6 @@ extern unsigned int memory_size; #endif /* __KERNEL__ */ #include -#include +#include #endif /* _ASM_MICROBLAZE_PAGE_H */ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 9f946e4ca057..72c80d2034c2 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -189,6 +189,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) #include -#include +#include #endif /* _ASM_PAGE_H */ diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 7bc5125d7d4c..a84cc1f925f6 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -159,6 +159,6 @@ extern int npmem_ranges; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _PARISC_PAGE_H */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index cd4c0b2a8e70..7cf799d70b4c 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -7,7 +7,7 @@ #include #include #include -#include +#include #define VERIFY_READ 0 #define VERIFY_WRITE 1 diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index a0e3f6e6b4ee..bd0849dbcaaa 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -41,7 +41,7 @@ extern void clear_pages(void *page, int order); static inline void clear_page(void *page) { clear_pages(page, 0); } extern void copy_page(void *to, void *from); -#include +#include #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 043bfdfe4f73..5817a3b747e5 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -180,6 +180,6 @@ do { \ (test_thread_flag(TIF_32BIT) ? \ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) -#include +#include #endif /* _ASM_POWERPC_PAGE_64_H */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 32e8f6aa4384..3e3594d01f83 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -150,7 +150,7 @@ void arch_alloc_page(struct page *page, int order); VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #define __HAVE_ARCH_GATE_AREA 1 diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 9c6d21ec0240..49592c780a6e 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -163,7 +163,7 @@ typedef struct page *pgtable_t; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include /* vDSO support */ #ifdef CONFIG_VSYSCALL diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index d1806edc0958..f72080bdda94 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -152,6 +152,6 @@ extern unsigned long pfn_base; VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #include -#include +#include #endif /* _SPARC_PAGE_H */ diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 4274ed13ddb2..f0d09b401036 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -132,6 +132,6 @@ typedef struct page *pgtable_t; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#include +#include #endif /* _SPARC64_PAGE_H */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index c64e767a3e4b..a38c03238918 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -12,7 +12,7 @@ #include #include #include -#include +#include #endif #ifndef __ASSEMBLY__ diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h index 55f28a0bae6d..4cc9b6cf480a 100644 --- a/arch/um/include/asm/page.h +++ b/arch/um/include/asm/page.h @@ -116,7 +116,7 @@ extern unsigned long uml_physmem; #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) #include -#include +#include #endif /* __ASSEMBLY__ */ #endif /* __UM_PAGE_H */ diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h index 89ed9d70b0aa..625c3f0e741a 100644 --- a/arch/x86/include/asm/page.h +++ b/arch/x86/include/asm/page.h @@ -56,7 +56,7 @@ extern bool __virt_addr_valid(unsigned long kaddr); #endif /* __ASSEMBLY__ */ #include -#include +#include #define __HAVE_ARCH_GATE_AREA 1 diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 17e0c5383b10..161bb89e98c8 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -129,7 +129,7 @@ static inline __attribute_const__ int get_order(unsigned long size) #else -# include +# include #endif diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h new file mode 100644 index 000000000000..67e7245dc9b3 --- /dev/null +++ b/include/asm-generic/getorder.h @@ -0,0 +1,24 @@ +#ifndef __ASM_GENERIC_GETORDER_H +#define __ASM_GENERIC_GETORDER_H + +#ifndef __ASSEMBLY__ + +#include + +/* Pure 2^n version of get_order */ +static inline __attribute_const__ int get_order(unsigned long size) +{ + int order; + + size = (size - 1) >> (PAGE_SHIFT - 1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GENERIC_GETORDER_H */ diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h deleted file mode 100644 index 14db733b8e68..000000000000 --- a/include/asm-generic/page.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _ASM_GENERIC_PAGE_H -#define _ASM_GENERIC_PAGE_H - -#ifndef __ASSEMBLY__ - -#include - -/* Pure 2^n version of get_order */ -static __inline__ __attribute_const__ int get_order(unsigned long size) -{ - int order; - - size = (size - 1) >> (PAGE_SHIFT - 1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - return order; -} - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_GENERIC_PAGE_H */ diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h new file mode 100644 index 000000000000..67deb898f0c5 --- /dev/null +++ b/include/asm-generic/uaccess-unaligned.h @@ -0,0 +1,26 @@ +#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H +#define __ASM_GENERIC_UACCESS_UNALIGNED_H + +/* + * This macro should be used instead of __get_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __get_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x; \ + __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ + (x) = __x; \ +}) + + +/* + * This macro should be used instead of __put_user() when accessing + * values at locations that are not known to be aligned. + */ +#define __put_user_unaligned(x, ptr) \ +({ \ + __typeof__ (*(ptr)) __x = (x); \ + __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ +}) + +#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h deleted file mode 100644 index 549cb3a1640a..000000000000 --- a/include/asm-generic/uaccess.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef _ASM_GENERIC_UACCESS_H_ -#define _ASM_GENERIC_UACCESS_H_ - -/* - * This macro should be used instead of __get_user() when accessing - * values at locations that are not known to be aligned. - */ -#define __get_user_unaligned(x, ptr) \ -({ \ - __typeof__ (*(ptr)) __x; \ - __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ - (x) = __x; \ -}) - - -/* - * This macro should be used instead of __put_user() when accessing - * values at locations that are not known to be aligned. - */ -#define __put_user_unaligned(x, ptr) \ -({ \ - __typeof__ (*(ptr)) __x = (x); \ - __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ -}) - -#endif /* _ASM_GENERIC_UACCESS_H */ -- cgit v1.2.3 From 76d4e00a05d06c1d1552adea24fcf6182c9d8999 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:21 +0200 Subject: [S390] merge cpu.h into cputime.h All definition in cpu.h have to do with cputime accounting. Move them to cputime.h and remove the header file. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cpu.h | 32 -------------------------------- arch/s390/include/asm/cputime.h | 19 +++++++++++++++++++ arch/s390/kernel/nmi.c | 2 +- arch/s390/kernel/s390_ext.c | 2 +- arch/s390/kernel/smp.c | 2 +- arch/s390/kernel/vtime.c | 2 +- drivers/s390/cio/cio.c | 2 +- 7 files changed, 24 insertions(+), 37 deletions(-) delete mode 100644 arch/s390/include/asm/cpu.h (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h deleted file mode 100644 index d60a2eefb17b..000000000000 --- a/arch/s390/include/asm/cpu.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * include/asm-s390/cpu.h - * - * Copyright IBM Corp. 2007 - * Author(s): Heiko Carstens - */ - -#ifndef _ASM_S390_CPU_H_ -#define _ASM_S390_CPU_H_ - -#include -#include -#include - -struct s390_idle_data { - spinlock_t lock; - unsigned long long idle_count; - unsigned long long idle_enter; - unsigned long long idle_time; -}; - -DECLARE_PER_CPU(struct s390_idle_data, s390_idle); - -void vtime_start_cpu(void); - -static inline void s390_idle_check(void) -{ - if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) - vtime_start_cpu(); -} - -#endif /* _ASM_S390_CPU_H_ */ diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 941384fbd39c..ec917d42ee6d 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -9,6 +9,9 @@ #ifndef _S390_CPUTIME_H #define _S390_CPUTIME_H +#include +#include +#include #include /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ @@ -174,8 +177,24 @@ cputime64_to_clock_t(cputime64_t cputime) return __div(cputime, 4096000000ULL / USER_HZ); } +struct s390_idle_data { + spinlock_t lock; + unsigned long long idle_count; + unsigned long long idle_enter; + unsigned long long idle_time; +}; + +DECLARE_PER_CPU(struct s390_idle_data, s390_idle); + +void vtime_start_cpu(void); cputime64_t s390_get_idle_time(int cpu); #define arch_idle_time(cpu) s390_get_idle_time(cpu) +static inline void s390_idle_check(void) +{ + if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) + vtime_start_cpu(); +} + #endif /* _S390_CPUTIME_H */ diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 28cf196ba775..015e27da40eb 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index a0d2d55d7fb3..6b0686d78fc7 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a985a3ba4401..0af302ceac2d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -47,7 +47,7 @@ #include #include #include -#include +#include #include #include "entry.h" diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c87f59bd8246..c8eb7255332b 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include static ext_int_info_t ext_int_info_timer; diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 2aebb9823044..9889f188c7c5 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From ce58ae6f7f6bdd48c87472ff830a6d586ff076b2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:22 +0200 Subject: [S390] implement interrupt-enabling rwlocks arch backend for f5f7eac41db827a47b2163330eecd7bb55ae9f12 "Allow rwlocks to re-enable interrupts". Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/spinlock.h | 19 ++++++++++++++++--- arch/s390/lib/spinlock.c | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 3 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index f3861b09ebb0..c9af0d19c7ab 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -122,8 +122,10 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp) #define __raw_write_can_lock(x) ((x)->lock == 0) extern void _raw_read_lock_wait(raw_rwlock_t *lp); +extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); extern int _raw_read_trylock_retry(raw_rwlock_t *lp); extern void _raw_write_lock_wait(raw_rwlock_t *lp); +extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); extern int _raw_write_trylock_retry(raw_rwlock_t *lp); static inline void __raw_read_lock(raw_rwlock_t *rw) @@ -134,6 +136,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) _raw_read_lock_wait(rw); } +static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) +{ + unsigned int old; + old = rw->lock & 0x7fffffffU; + if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + _raw_read_lock_wait_flags(rw, flags); +} + static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned int old, cmp; @@ -151,6 +161,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) _raw_write_lock_wait(rw); } +static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) +{ + if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + _raw_write_lock_wait_flags(rw, flags); +} + static inline void __raw_write_unlock(raw_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); @@ -172,9 +188,6 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return _raw_write_trylock_retry(rw); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) - #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index e41f4008afc5..f7e0d30250b7 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -124,6 +124,27 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); +void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +{ + unsigned int old; + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + _raw_yield(); + count = spin_retry; + } + if (!__raw_read_can_lock(rw)) + continue; + old = rw->lock & 0x7fffffffU; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + return; + } +} +EXPORT_SYMBOL(_raw_read_lock_wait_flags); + int _raw_read_trylock_retry(raw_rwlock_t *rw) { unsigned int old; @@ -157,6 +178,25 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); +void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +{ + int count = spin_retry; + + local_irq_restore(flags); + while (1) { + if (count-- <= 0) { + _raw_yield(); + count = spin_retry; + } + if (!__raw_write_can_lock(rw)) + continue; + local_irq_disable(); + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + return; + } +} +EXPORT_SYMBOL(_raw_write_lock_wait_flags); + int _raw_write_trylock_retry(raw_rwlock_t *rw) { int count = spin_retry; -- cgit v1.2.3 From 8c4caa4fbfc18aa50d9d682f502303a8e0d72726 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 12 Jun 2009 10:26:23 +0200 Subject: [S390] use facility list for cpu type safety check Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/lowcore.h | 1 + arch/s390/kernel/head.S | 49 ++++++++++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 15 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 3aeca492b147..713fe9fb1fcc 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -30,6 +30,7 @@ #define __LC_SUBCHANNEL_NR 0x00ba #define __LC_IO_INT_PARM 0x00bc #define __LC_IO_INT_WORD 0x00c0 +#define __LC_STFL_FAC_LIST 0x00c8 #define __LC_MCCK_CODE 0x00e8 #define __LC_DUMP_REIPL 0x0e00 diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 22596d70fc2e..bf8cf1caeffc 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -479,27 +479,46 @@ startup:basr %r13,0 # get base mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) mvc __LC_EXIT_TIMER(8),5f-.LPG0(%r13) #ifndef CONFIG_MARCH_G5 - # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} - stidp __LC_CPUID # store cpuid - lhi %r0,(3f-2f) / 2 - la %r1,2f-.LPG0(%r13) -0: clc __LC_CPUID+4(2),0(%r1) - jne 3f - lpsw 1f-.LPG0(13) # machine type not good enough, crash + # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} + xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST + stfl __LC_STFL_FAC_LIST # store facility list + tm __LC_STFL_FAC_LIST,0x01 # stfle available ? + jz 0f + la %r0,0 + .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended +0: l %r0,__LC_STFL_FAC_LIST + n %r0,2f+8-.LPG0(%r13) + cl %r0,2f+8-.LPG0(%r13) + jne 1f + l %r0,__LC_STFL_FAC_LIST+4 + n %r0,2f+12-.LPG0(%r13) + cl %r0,2f+12-.LPG0(%r13) + je 3f +1: lpsw 2f-.LPG0(13) # machine type not good enough, crash .align 16 -1: .long 0x000a0000,0x00000000 -2: +2: .long 0x000a0000,0x8badcccc +#if defined(CONFIG_64BIT) +#if defined(CONFIG_MARCH_Z10) + .long 0xc100efe3, 0xf0680000 +#elif defined(CONFIG_MARCH_Z9_109) + .long 0xc100efc3, 0x00000000 +#elif defined(CONFIG_MARCH_Z990) + .long 0xc0002000, 0x00000000 +#elif defined(CONFIG_MARCH_Z900) + .long 0xc0000000, 0x00000000 +#endif +#else #if defined(CONFIG_MARCH_Z10) - .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096 + .long 0x8100c880, 0x00000000 #elif defined(CONFIG_MARCH_Z9_109) - .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086 + .long 0x8100c880, 0x00000000 #elif defined(CONFIG_MARCH_Z990) - .short 0x9672, 0x2064, 0x2066 + .long 0x80002000, 0x00000000 #elif defined(CONFIG_MARCH_Z900) - .short 0x9672 + .long 0x80000000, 0x00000000 +#endif #endif -3: la %r1,2(%r1) - brct %r0,0b +3: #endif l %r13,4f-.LPG0(%r13) -- cgit v1.2.3 From 7757591ab4a36314a258e181dbf0994415c288c2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:25 +0200 Subject: [S390] implement is_compat_task Implement is_compat_task and use it all over the place. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/compat.h | 19 ++++++++++++++++++- arch/s390/kernel/process.c | 3 ++- arch/s390/kernel/ptrace.c | 12 +++++------- arch/s390/kernel/signal.c | 3 ++- arch/s390/kernel/vdso.c | 4 ++-- arch/s390/mm/fault.c | 3 ++- arch/s390/mm/mmap.c | 11 +++++------ 7 files changed, 36 insertions(+), 19 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index de065b32381a..01a08020bc0e 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -5,6 +5,7 @@ */ #include #include +#include #define PSW32_MASK_PER 0x40000000UL #define PSW32_MASK_DAT 0x04000000UL @@ -163,12 +164,28 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } +#ifdef CONFIG_COMPAT + +static inline int is_compat_task(void) +{ + return test_thread_flag(TIF_31BIT); +} + +#else + +static inline int is_compat_task(void) +{ + return 0; +} + +#endif + static inline void __user *compat_alloc_user_space(long len) { unsigned long stack; stack = KSTK_ESP(current); - if (test_thread_flag(TIF_31BIT)) + if (is_compat_task()) stack &= 0x7fffffffUL; return (void __user *) (stack - len); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index a3acd8e60aff..355f7a30c3f1 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -204,7 +205,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, save_fp_regs(&p->thread.fp_regs); /* Set a new TLS ? */ if (clone_flags & CLONE_SETTLS) { - if (test_thread_flag(TIF_31BIT)) { + if (is_compat_task()) { p->thread.acrs[0] = (unsigned int) regs->gprs[6]; } else { p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 75c496f4f16d..99eef179e903 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -36,7 +36,7 @@ #include #include #include - +#include #include #include #include @@ -69,7 +69,7 @@ FixPerRegisters(struct task_struct *task) if (per_info->single_step) { per_info->control_regs.bits.starting_addr = 0; #ifdef CONFIG_COMPAT - if (test_thread_flag(TIF_31BIT)) + if (is_compat_task()) per_info->control_regs.bits.ending_addr = 0x7fffffffUL; else #endif @@ -482,8 +482,7 @@ static int peek_user_compat(struct task_struct *child, { __u32 tmp; - if (!test_thread_flag(TIF_31BIT) || - (addr & 3) || addr > sizeof(struct user) - 3) + if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3) return -EIO; tmp = __peek_user_compat(child, addr); @@ -584,8 +583,7 @@ static int __poke_user_compat(struct task_struct *child, static int poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { - if (!test_thread_flag(TIF_31BIT) || - (addr & 3) || addr > sizeof(struct user32) - 3) + if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) return -EIO; return __poke_user_compat(child, addr, data); @@ -660,7 +658,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) } if (unlikely(current->audit_context)) - audit_syscall_entry(test_thread_flag(TIF_31BIT) ? + audit_syscall_entry(is_compat_task() ? AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 3cf74c3ccb69..062bd64e65fa 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -482,7 +483,7 @@ void do_signal(struct pt_regs *regs) /* Whee! Actually deliver the signal. */ int ret; #ifdef CONFIG_COMPAT - if (test_thread_flag(TIF_31BIT)) { + if (is_compat_task()) { ret = handle_signal32(signr, &ka, &info, oldset, regs); } else diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 7b76ee4fb16c..45e1708b70fd 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -22,7 +22,7 @@ #include #include #include - +#include #include #include #include @@ -214,7 +214,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) vdso_pagelist = vdso64_pagelist; vdso_pages = vdso64_pages; #ifdef CONFIG_COMPAT - if (test_thread_flag(TIF_31BIT)) { + if (is_compat_task()) { vdso_pagelist = vdso32_pagelist; vdso_pages = vdso32_pages; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 833e8366c351..220a152c836c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -239,7 +240,7 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs, up_read(&mm->mmap_sem); clear_tsk_thread_flag(current, TIF_SINGLE_STEP); #ifdef CONFIG_COMPAT - compat = test_tsk_thread_flag(current, TIF_31BIT); + compat = is_compat_task(); if (compat && instruction == 0x0a77) sys32_sigreturn(); else if (compat && instruction == 0x0aad) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index e008d236cc15..f4558ccf02b9 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -28,6 +28,7 @@ #include #include #include +#include /* * Top of mmap area (just below the process stack). @@ -55,7 +56,7 @@ static inline int mmap_is_legacy(void) /* * Force standard allocation for 64 bit programs. */ - if (!test_thread_flag(TIF_31BIT)) + if (!is_compat_task()) return 1; #endif return sysctl_legacy_va_layout || @@ -91,7 +92,7 @@ EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); int s390_mmap_check(unsigned long addr, unsigned long len) { - if (!test_thread_flag(TIF_31BIT) && + if (!is_compat_task() && len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) return crst_table_upgrade(current->mm, 1UL << 53); return 0; @@ -108,8 +109,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && - !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { /* Upgrade the page table to 4 levels and retry. */ rc = crst_table_upgrade(mm, 1UL << 53); if (rc) @@ -131,8 +131,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && - !test_thread_flag(TIF_31BIT) && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { /* Upgrade the page table to 4 levels and retry. */ rc = crst_table_upgrade(mm, 1UL << 53); if (rc) -- cgit v1.2.3 From bcf5cef7db869dd3b0ec55ad99641e66b2f5cf02 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:26 +0200 Subject: [S390] secure computing arch backend Enable secure computing on s390 as well. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 18 ++++++++++++++++++ arch/s390/include/asm/seccomp.h | 16 ++++++++++++++++ arch/s390/include/asm/thread_info.h | 10 ++++++---- arch/s390/kernel/entry.S | 5 +++-- arch/s390/kernel/entry64.S | 5 +++-- arch/s390/kernel/ptrace.c | 6 +++++- 6 files changed, 51 insertions(+), 9 deletions(-) create mode 100644 arch/s390/include/asm/seccomp.h (limited to 'arch/s390/include/asm') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2eca5fe0e75b..1094787e97e5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -567,6 +567,24 @@ bool "s390 guest support for KVM (EXPERIMENTAL)" the KVM hypervisor. This will add detection for KVM as well as a virtio transport. If KVM is detected, the virtio console will be the default console. + +config SECCOMP + bool "Enable seccomp to safely compute untrusted bytecode" + depends on PROC_FS + default y + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via /proc//seccomp, it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. + endmenu source "net/Kconfig" diff --git a/arch/s390/include/asm/seccomp.h b/arch/s390/include/asm/seccomp.h new file mode 100644 index 000000000000..781a9cf9b002 --- /dev/null +++ b/arch/s390/include/asm/seccomp.h @@ -0,0 +1,16 @@ +#ifndef _ASM_S390_SECCOMP_H +#define _ASM_S390_SECCOMP_H + +#include + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_sigreturn + +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#define __NR_seccomp_sigreturn_32 __NR_sigreturn + +#endif /* _ASM_S390_SECCOMP_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 461f2abd2e6f..2f86653dda69 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -83,14 +83,15 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags bit numbers */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_SIGPENDING 2 /* signal pending */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ -#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ +#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ +#define TIF_SECCOMP 10 /* secure computing */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -99,15 +100,16 @@ static inline struct thread_info *current_thread_info(void) #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ #define TIF_FREEZE 21 /* thread is freezing for suspend */ -#define _TIF_SYSCALL_TRACE (1<>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT @@ -265,7 +266,7 @@ sysc_do_restart: sth %r7,SP_SVCNR(%r15) sll %r7,2 # svc number *4 l %r8,BASED(.Lsysc_table) - tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+2(%r9),_TIF_SYSCALL l %r8,0(%r7,%r8) # get system call addr. bnz BASED(sysc_tracesys) basr %r14,%r8 # call sys_xxxx @@ -405,7 +406,7 @@ sysc_tracego: basr %r14,%r8 # call sys_xxx st %r2,SP_R2(%r15) # store return value sysc_tracenogo: - tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+2(%r9),_TIF_SYSCALL bz BASED(sysc_return) l %r1,BASED(.Ltrace_exit) la %r2,SP_PTREGS(%r15) # load pt_regs diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 84a105838e03..3cec9b504f5f 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -56,6 +56,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING) +_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) #define BASED(name) name-system_call(%r13) @@ -260,7 +261,7 @@ sysc_do_restart: larl %r10,sys_call_table_emu # use 31 bit emulation system calls sysc_noemu: #endif - tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+6(%r9),_TIF_SYSCALL lgf %r8,0(%r7,%r10) # load address of system call routine jnz sysc_tracesys basr %r14,%r8 # call sys_xxxx @@ -391,7 +392,7 @@ sysc_tracego: basr %r14,%r8 # call sys_xxx stg %r2,SP_R2(%r15) # store return value sysc_tracenogo: - tm __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) + tm __TI_flags+6(%r9),_TIF_SYSCALL jz sysc_return la %r2,SP_PTREGS(%r15) # load pt_regs larl %r14,sysc_return # return point is sysc_return diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 99eef179e903..b6fc1ae2ffcb 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -36,7 +36,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -640,6 +641,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) { long ret; + /* Do the secure computing check first. */ + secure_computing(regs->gprs[2]); + /* * The sysc_tracesys code in entry.S stored the system * call number to gprs[2]. -- cgit v1.2.3 From dab4079d5b5ac421208499d5e554a07f9beb16e4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:32 +0200 Subject: [S390] uaccess: use might_fault() instead of might_sleep() Adds more checking in case lockdep is turned on. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/uaccess.h | 16 ++++++++-------- arch/s390/kvm/kvm-s390.c | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 0235970278f0..8377e91533d2 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -131,7 +131,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) #define put_user(x, ptr) \ ({ \ - might_sleep(); \ + might_fault(); \ __put_user(x, ptr); \ }) @@ -180,7 +180,7 @@ extern int __put_user_bad(void) __attribute__((noreturn)); #define get_user(x, ptr) \ ({ \ - might_sleep(); \ + might_fault(); \ __get_user(x, ptr); \ }) @@ -231,7 +231,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n) static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_WRITE, to, n)) n = __copy_to_user(to, from, n); return n; @@ -282,7 +282,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); else @@ -299,7 +299,7 @@ __copy_in_user(void __user *to, const void __user *from, unsigned long n) static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) { - might_sleep(); + might_fault(); if (__access_ok(from,n) && __access_ok(to,n)) n = __copy_in_user(to, from, n); return n; @@ -312,7 +312,7 @@ static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; - might_sleep(); + might_fault(); if (access_ok(VERIFY_READ, src, 1)) res = uaccess.strncpy_from_user(count, src, dst); return res; @@ -321,7 +321,7 @@ strncpy_from_user(char *dst, const char __user *src, long count) static inline unsigned long strnlen_user(const char __user * src, unsigned long n) { - might_sleep(); + might_fault(); return uaccess.strnlen_user(n, src); } @@ -354,7 +354,7 @@ __clear_user(void __user *to, unsigned long n) static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { - might_sleep(); + might_fault(); if (access_ok(VERIFY_WRITE, to, n)) n = uaccess.clear_user(n, to); return n; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 10bccd1f8aee..c18b21d6991c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -512,7 +512,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) BUG(); } - might_sleep(); + might_fault(); do { __vcpu_run(vcpu); -- cgit v1.2.3 From 239a64255fae8933d95273b5b92545949ca4e743 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:33 +0200 Subject: [S390] vmalloc: add vmalloc kernel parameter support With the kernel parameter 'vmalloc=' the size of the vmalloc area can be specified. This can be used to increase or decrease the size of the area. Works in the same way as on some other architectures. This can be useful for features which make excessive use of vmalloc and wouldn't work otherwise. The default sizes remain unchanged: 96MB for 31 bit kernels and 1GB for 64 bit kernels. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/pgtable.h | 7 +++++-- arch/s390/mm/pgtable.c | 16 +++++++++++++--- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 5caddd4f7bed..60a7b1a1702f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -112,12 +112,15 @@ extern char empty_zero_page[PAGE_SIZE]; * effect, this also makes sure that 64 bit module code cannot be used * as system call address. */ + +extern unsigned long VMALLOC_START; + #ifndef __s390x__ -#define VMALLOC_START 0x78000000UL +#define VMALLOC_SIZE (96UL << 20) #define VMALLOC_END 0x7e000000UL #define VMEM_MAP_END 0x80000000UL #else /* __s390x__ */ -#define VMALLOC_START 0x3e000000000UL +#define VMALLOC_SIZE (1UL << 30) #define VMALLOC_END 0x3e040000000UL #define VMEM_MAP_END 0x40000000000UL #endif /* __s390x__ */ diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index be6c1cf4ad5a..4ca8e826bf30 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -1,7 +1,5 @@ /* - * arch/s390/mm/pgtable.c - * - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007,2009 * Author(s): Martin Schwidefsky */ @@ -53,6 +51,18 @@ void clear_table_pgstes(unsigned long *table) #endif +unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; +EXPORT_SYMBOL(VMALLOC_START); + +static int __init parse_vmalloc(char *arg) +{ + if (!arg) + return -EINVAL; + VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; + return 0; +} +early_param("vmalloc", parse_vmalloc); + unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) { struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); -- cgit v1.2.3 From dfd9f7abc0fb67b5781f340d982384cea53b2884 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:44 +0200 Subject: [S390] ftrace: add dynamic ftrace support Dynamic ftrace support for s390. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 2 + arch/s390/include/asm/ftrace.h | 19 ++++++ arch/s390/include/asm/lowcore.h | 8 ++- arch/s390/kernel/Makefile | 5 ++ arch/s390/kernel/early.c | 4 ++ arch/s390/kernel/ftrace.c | 132 ++++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/mcount.S | 119 ++++++++++++++++++++++++++++-------- arch/s390/kernel/setup.c | 2 + arch/s390/kernel/smp.c | 1 + scripts/recordmcount.pl | 13 ++++ 10 files changed, 276 insertions(+), 29 deletions(-) create mode 100644 arch/s390/kernel/ftrace.c (limited to 'arch/s390/include/asm') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1094787e97e5..b674e79044a0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -82,6 +82,8 @@ config S390 select USE_GENERIC_SMP_HELPERS if SMP select HAVE_SYSCALL_WRAPPERS select HAVE_FUNCTION_TRACER + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_DYNAMIC_FTRACE select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_OPROFILE select HAVE_KPROBES diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 5a5bc75e19d4..ba23d8f97d07 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -2,7 +2,26 @@ #define _ASM_S390_FTRACE_H #ifndef __ASSEMBLY__ + extern void _mcount(void); +extern unsigned long ftrace_dyn_func; + +struct dyn_arch_ftrace { }; + +#define MCOUNT_ADDR ((long)_mcount) + +#ifdef CONFIG_64BIT +#define MCOUNT_INSN_SIZE 24 +#define MCOUNT_OFFSET 14 +#else +#define MCOUNT_INSN_SIZE 30 +#define MCOUNT_OFFSET 8 #endif +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr - MCOUNT_OFFSET; +} + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_FTRACE_H */ diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 713fe9fb1fcc..5046ad6b7a63 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -68,6 +68,7 @@ #define __LC_CPUID 0x02b0 #define __LC_INT_CLOCK 0x02c8 #define __LC_MACHINE_FLAGS 0x02d8 +#define __LC_FTRACE_FUNC 0x02dc #define __LC_IRB 0x0300 #define __LC_PFAULT_INTPARM 0x0080 #define __LC_CPU_TIMER_SAVE_AREA 0x00d8 @@ -113,6 +114,7 @@ #define __LC_INT_CLOCK 0x0340 #define __LC_VDSO_PER_CPU 0x0350 #define __LC_MACHINE_FLAGS 0x0358 +#define __LC_FTRACE_FUNC 0x0360 #define __LC_IRB 0x0380 #define __LC_PASTE 0x03c0 #define __LC_PFAULT_INTPARM 0x11b8 @@ -281,7 +283,8 @@ struct _lowcore __u64 int_clock; /* 0x02c8 */ __u64 clock_comparator; /* 0x02d0 */ __u32 machine_flags; /* 0x02d8 */ - __u8 pad_0x02dc[0x0300-0x02dc]; /* 0x02dc */ + __u32 ftrace_func; /* 0x02dc */ + __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ @@ -386,7 +389,8 @@ struct _lowcore __u64 clock_comparator; /* 0x0348 */ __u64 vdso_per_cpu_data; /* 0x0350 */ __u64 machine_flags; /* 0x0358 */ - __u8 pad_0x0360[0x0380-0x0360]; /* 0x0360 */ + __u64 ftrace_func; /* 0x0360 */ + __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */ /* Interrupt response block. */ __u8 irb[64]; /* 0x0380 */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 0657de7944fe..ce172bfaab8a 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -7,6 +7,10 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_early.o = -pg endif +ifdef CONFIG_DYNAMIC_FTRACE +CFLAGS_REMOVE_ftrace.o = -pg +endif + # # Passing null pointers is ok for smp code, since we access the lowcore here. # @@ -41,6 +45,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cf09948faad6..fb263736826c 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -410,5 +411,8 @@ void __init startup_init(void) sclp_facilities_detect(); detect_memory_layout(memory_chunk); S390_lowcore.machine_flags = machine_flags; +#ifdef CONFIG_DYNAMIC_FTRACE + S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; +#endif lockdep_on(); } diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c new file mode 100644 index 000000000000..0b81a784e039 --- /dev/null +++ b/arch/s390/kernel/ftrace.c @@ -0,0 +1,132 @@ +/* + * Dynamic function tracer architecture backend. + * + * Copyright IBM Corp. 2009 + * + * Author(s): Heiko Carstens , + * + */ + +#include +#include +#include +#include +#include + +void ftrace_disable_code(void); +void ftrace_call_code(void); +void ftrace_nop_code(void); + +#define FTRACE_INSN_SIZE 4 + +#ifdef CONFIG_64BIT + +asm( + " .align 4\n" + "ftrace_disable_code:\n" + " j 0f\n" + " .word 0x0024\n" + " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" + " basr %r14,%r1\n" + " lg %r14,8(15)\n" + " lgr %r0,%r0\n" + "0:\n"); + +asm( + " .align 4\n" + "ftrace_nop_code:\n" + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); + +asm( + " .align 4\n" + "ftrace_call_code:\n" + " stg %r14,8(%r15)\n"); + +#else /* CONFIG_64BIT */ + +asm( + " .align 4\n" + "ftrace_disable_code:\n" + " j 0f\n" + " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" + " basr %r14,%r1\n" + " l %r14,4(%r15)\n" + " j 0f\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + " bcr 0,%r7\n" + "0:\n"); + +asm( + " .align 4\n" + "ftrace_nop_code:\n" + " j .+"__stringify(MCOUNT_INSN_SIZE)"\n"); + +asm( + " .align 4\n" + "ftrace_call_code:\n" + " st %r14,4(%r15)\n"); + +#endif /* CONFIG_64BIT */ + +static int ftrace_modify_code(unsigned long ip, + void *old_code, int old_size, + void *new_code, int new_size) +{ + unsigned char replaced[MCOUNT_INSN_SIZE]; + + /* + * Note: Due to modules code can disappear and change. + * We need to protect against faulting as well as code + * changing. We do this by using the probe_kernel_* + * functions. + * This however is just a simple sanity check. + */ + if (probe_kernel_read(replaced, (void *)ip, old_size)) + return -EFAULT; + if (memcmp(replaced, old_code, old_size) != 0) + return -EINVAL; + if (probe_kernel_write((void *)ip, new_code, new_size)) + return -EPERM; + return 0; +} + +static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + return ftrace_modify_code(rec->ip, + ftrace_call_code, FTRACE_INSN_SIZE, + ftrace_disable_code, MCOUNT_INSN_SIZE); +} + +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + if (addr == MCOUNT_ADDR) + return ftrace_make_initial_nop(mod, rec, addr); + return ftrace_modify_code(rec->ip, + ftrace_call_code, FTRACE_INSN_SIZE, + ftrace_nop_code, FTRACE_INSN_SIZE); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + return ftrace_modify_code(rec->ip, + ftrace_nop_code, FTRACE_INSN_SIZE, + ftrace_call_code, FTRACE_INSN_SIZE); +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + ftrace_dyn_func = (unsigned long)func; + return 0; +} + +int __init ftrace_dyn_arch_init(void *data) +{ + *(unsigned long *)data = 0; + return 0; +} diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 80641224a095..de274996f640 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -1,5 +1,5 @@ /* - * Copyright IBM Corp. 2008 + * Copyright IBM Corp. 2008,2009 * * Author(s): Heiko Carstens , * @@ -7,36 +7,46 @@ #include -#ifndef CONFIG_64BIT -.globl _mcount + .globl ftrace_stub +ftrace_stub: + br %r14 + +#ifdef CONFIG_64BIT + +#ifdef CONFIG_DYNAMIC_FTRACE + + .globl _mcount _mcount: - stm %r0,%r5,8(%r15) - st %r14,56(%r15) - lr %r1,%r15 - ahi %r15,-96 - l %r3,100(%r15) - la %r2,0(%r14) - st %r1,__SF_BACKCHAIN(%r15) - la %r3,0(%r3) - bras %r14,0f - .long ftrace_trace_function -0: l %r14,0(%r14) - l %r14,0(%r14) - basr %r14,%r14 - ahi %r15,96 - lm %r0,%r5,8(%r15) - l %r14,56(%r15) br %r14 -.globl ftrace_stub -ftrace_stub: + .globl ftrace_caller +ftrace_caller: + stmg %r2,%r5,32(%r15) + stg %r14,112(%r15) + lgr %r1,%r15 + aghi %r15,-160 + stg %r1,__SF_BACKCHAIN(%r15) + lgr %r2,%r14 + lg %r3,168(%r15) + larl %r14,ftrace_dyn_func + lg %r14,0(%r14) + basr %r14,%r14 + aghi %r15,160 + lmg %r2,%r5,32(%r15) + lg %r14,112(%r15) br %r14 -#else /* CONFIG_64BIT */ + .data + .globl ftrace_dyn_func +ftrace_dyn_func: + .quad ftrace_stub + .previous + +#else /* CONFIG_DYNAMIC_FTRACE */ -.globl _mcount + .globl _mcount _mcount: - stmg %r0,%r5,16(%r15) + stmg %r2,%r5,32(%r15) stg %r14,112(%r15) lgr %r1,%r15 aghi %r15,-160 @@ -47,12 +57,67 @@ _mcount: lg %r14,0(%r14) basr %r14,%r14 aghi %r15,160 - lmg %r0,%r5,16(%r15) + lmg %r2,%r5,32(%r15) lg %r14,112(%r15) br %r14 -.globl ftrace_stub -ftrace_stub: +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#else /* CONFIG_64BIT */ + +#ifdef CONFIG_DYNAMIC_FTRACE + + .globl _mcount +_mcount: + br %r14 + + .globl ftrace_caller +ftrace_caller: + stm %r2,%r5,16(%r15) + st %r14,56(%r15) + lr %r1,%r15 + ahi %r15,-96 + l %r3,100(%r15) + la %r2,0(%r14) + st %r1,__SF_BACKCHAIN(%r15) + la %r3,0(%r3) + bras %r14,0f + .long ftrace_dyn_func +0: l %r14,0(%r14) + l %r14,0(%r14) + basr %r14,%r14 + ahi %r15,96 + lm %r2,%r5,16(%r15) + l %r14,56(%r15) + br %r14 + + .data + .globl ftrace_dyn_func +ftrace_dyn_func: + .long ftrace_stub + .previous + +#else /* CONFIG_DYNAMIC_FTRACE */ + + .globl _mcount +_mcount: + stm %r2,%r5,16(%r15) + st %r14,56(%r15) + lr %r1,%r15 + ahi %r15,-96 + l %r3,100(%r15) + la %r2,0(%r14) + st %r1,__SF_BACKCHAIN(%r15) + la %r3,0(%r3) + bras %r14,0f + .long ftrace_trace_function +0: l %r14,0(%r14) + l %r14,0(%r14) + basr %r14,%r14 + ahi %r15,96 + lm %r2,%r5,16(%r15) + l %r14,56(%r15) br %r14 +#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7402b6a39ead..9717717c6fea 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -442,6 +443,7 @@ setup_lowcore(void) lc->steal_timer = S390_lowcore.steal_timer; lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; + lc->ftrace_func = S390_lowcore.ftrace_func; set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 0af302ceac2d..cc8c484984e3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -572,6 +572,7 @@ int __cpuinit __cpu_up(unsigned int cpu) cpu_lowcore->cpu_nr = cpu; cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; cpu_lowcore->machine_flags = S390_lowcore.machine_flags; + cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func; eieio(); while (signal_processor(cpu, sigp_restart) == sigp_busy) diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 0fae7da0529c..91033e67321e 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -185,6 +185,19 @@ if ($arch eq "x86_64") { $objcopy .= " -O elf32-i386"; $cc .= " -m32"; +} elsif ($arch eq "s390" && $bits == 32) { + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_32\\s+_mcount\$"; + $alignment = 4; + $ld .= " -m elf_s390"; + $cc .= " -m31"; + +} elsif ($arch eq "s390" && $bits == 64) { + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; + $alignment = 8; + $type = ".quad"; + $ld .= " -m elf64_s390"; + $cc .= " -m64"; + } elsif ($arch eq "sh") { $alignment = 2; -- cgit v1.2.3 From 88dbd2037229bd2ed7543ffd0d8f2d9dec9d31d2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:46 +0200 Subject: [S390] ftrace: add function graph tracer support Function graph tracer support for s390. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + arch/s390/include/asm/ftrace.h | 10 +++--- arch/s390/kernel/Makefile | 6 ++-- arch/s390/kernel/ftrace.c | 72 ++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/mcount.S | 79 ++++++++++++++++++++++++++++++++++++++++++ arch/s390/kernel/s390_ext.c | 3 +- arch/s390/kernel/time.c | 2 +- arch/s390/kernel/vmlinux.lds.S | 1 + drivers/s390/cio/cio.c | 4 +-- 9 files changed, 166 insertions(+), 12 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 480590f21570..9023cc900bda 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -85,6 +85,7 @@ config S390 select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE + select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DEFAULT_NO_SPIN_MUTEXES select HAVE_OPROFILE select HAVE_KPROBES diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index ba23d8f97d07..96c14a9102b8 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -11,11 +11,13 @@ struct dyn_arch_ftrace { }; #define MCOUNT_ADDR ((long)_mcount) #ifdef CONFIG_64BIT -#define MCOUNT_INSN_SIZE 24 -#define MCOUNT_OFFSET 14 +#define MCOUNT_OFFSET_RET 18 +#define MCOUNT_INSN_SIZE 24 +#define MCOUNT_OFFSET 14 #else -#define MCOUNT_INSN_SIZE 30 -#define MCOUNT_OFFSET 8 +#define MCOUNT_OFFSET_RET 26 +#define MCOUNT_INSN_SIZE 30 +#define MCOUNT_OFFSET 8 #endif static inline unsigned long ftrace_call_adjust(unsigned long addr) diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index ce172bfaab8a..c75ed43b1a18 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -3,11 +3,8 @@ # ifdef CONFIG_FUNCTION_TRACER -# Do not trace early boot code +# Don't trace early setup code and tracing code CFLAGS_REMOVE_early.o = -pg -endif - -ifdef CONFIG_DYNAMIC_FTRACE CFLAGS_REMOVE_ftrace.o = -pg endif @@ -46,6 +43,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 0b81a784e039..c92a10953279 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -7,13 +7,17 @@ * */ +#include #include #include #include #include #include +#ifdef CONFIG_DYNAMIC_FTRACE + void ftrace_disable_code(void); +void ftrace_disable_return(void); void ftrace_call_code(void); void ftrace_nop_code(void); @@ -28,6 +32,7 @@ asm( " .word 0x0024\n" " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n" " basr %r14,%r1\n" + "ftrace_disable_return:\n" " lg %r14,8(15)\n" " lgr %r0,%r0\n" "0:\n"); @@ -50,6 +55,7 @@ asm( " j 0f\n" " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n" " basr %r14,%r1\n" + "ftrace_disable_return:\n" " l %r14,4(%r15)\n" " j 0f\n" " bcr 0,%r7\n" @@ -130,3 +136,69 @@ int __init ftrace_dyn_arch_init(void *data) *(unsigned long *)data = 0; return 0; } + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Patch the kernel code at ftrace_graph_caller location: + * The instruction there is branch relative on condition. The condition mask + * is either all ones (always branch aka disable ftrace_graph_caller) or all + * zeroes (nop aka enable ftrace_graph_caller). + * Instruction format for brc is a7m4xxxx where m is the condition mask. + */ +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned short opcode = 0xa704; + + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned short opcode = 0xa7f4; + + return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode)); +} + +static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) +{ + return addr - (ftrace_disable_return - ftrace_disable_code); +} + +#else /* CONFIG_DYNAMIC_FTRACE */ + +static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr) +{ + return addr - MCOUNT_OFFSET_RET; +} + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addresses + * in current thread info. + */ +unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent) +{ + struct ftrace_graph_ent trace; + + /* Nmi's are currently unsupported. */ + if (unlikely(in_nmi())) + goto out; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + goto out; + if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY) + goto out; + trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN; + /* Only trace if the calling function expects to. */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + goto out; + } + parent = (unsigned long)return_to_handler; +out: + return parent; +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 0aa85ec94d08..2a0a5e97ba8c 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -34,6 +34,18 @@ ftrace_caller: larl %r14,ftrace_dyn_func lg %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_caller +ftrace_graph_caller: + # This unconditional branch gets runtime patched. Change only if + # you know what you are doing. See ftrace_enable_graph_caller(). + j 0f + lg %r2,272(%r15) + lg %r3,168(%r15) + brasl %r14,prepare_ftrace_return + stg %r2,168(%r15) +0: +#endif aghi %r15,160 lmg %r2,%r5,32(%r15) lg %r14,112(%r15) @@ -62,6 +74,12 @@ _mcount: larl %r14,ftrace_trace_function lg %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + lg %r2,272(%r15) + lg %r3,168(%r15) + brasl %r14,prepare_ftrace_return + stg %r2,168(%r15) +#endif aghi %r15,160 lmg %r2,%r5,32(%r15) lg %r14,112(%r15) @@ -69,6 +87,22 @@ _mcount: #endif /* CONFIG_DYNAMIC_FTRACE */ +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + + .globl return_to_handler +return_to_handler: + stmg %r2,%r5,32(%r15) + lgr %r1,%r15 + aghi %r15,-160 + stg %r1,__SF_BACKCHAIN(%r15) + brasl %r14,ftrace_return_to_handler + aghi %r15,160 + lgr %r14,%r2 + lmg %r2,%r5,32(%r15) + br %r14 + +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + #else /* CONFIG_64BIT */ #ifdef CONFIG_DYNAMIC_FTRACE @@ -96,6 +130,21 @@ ftrace_caller: l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_caller +ftrace_graph_caller: + # This unconditional branch gets runtime patched. Change only if + # you know what you are doing. See ftrace_enable_graph_caller(). + j 1f + bras %r1,0f + .long prepare_ftrace_return +0: l %r2,152(%r15) + l %r4,0(%r1) + l %r3,100(%r15) + basr %r14,%r4 + st %r2,100(%r15) +1: +#endif ahi %r15,96 l %r14,56(%r15) 3: lm %r2,%r5,16(%r15) @@ -128,10 +177,40 @@ _mcount: l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bras %r1,0f + .long prepare_ftrace_return +0: l %r2,152(%r15) + l %r4,0(%r1) + l %r3,100(%r15) + basr %r14,%r4 + st %r2,100(%r15) +#endif ahi %r15,96 l %r14,56(%r15) 3: lm %r2,%r5,16(%r15) br %r14 #endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + + .globl return_to_handler +return_to_handler: + stm %r2,%r5,16(%r15) + st %r14,56(%r15) + lr %r0,%r15 + ahi %r15,-96 + st %r0,__SF_BACKCHAIN(%r15) + bras %r1,0f + .long ftrace_return_to_handler +0: l %r2,0b-0b(%r1) + basr %r14,%r2 + lr %r14,%r2 + ahi %r15,96 + lm %r2,%r5,16(%r15) + br %r14 + +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + #endif /* CONFIG_64BIT */ diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index 6b0686d78fc7..0de305b598ce 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -112,7 +113,7 @@ int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler, return 0; } -void do_extint(struct pt_regs *regs, unsigned short code) +void __irq_entry do_extint(struct pt_regs *regs, unsigned short code) { ext_int_info_t *p; int index; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ad9a999aaa92..215330a2c128 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -70,7 +70,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); /* * Scheduler clock - returns current time in nanosec units. */ -unsigned long long sched_clock(void) +unsigned long long notrace sched_clock(void) { return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 89399b8756c2..a53db23ee092 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -34,6 +34,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IRQENTRY_TEXT *(.fixup) *(.gnu.warning) } :text = 0x0700 diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 9889f188c7c5..5ec7789bd9d8 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -12,6 +12,7 @@ #define KMSG_COMPONENT "cio" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#include #include #include #include @@ -626,8 +627,7 @@ out: * handlers). * */ -void -do_IRQ (struct pt_regs *regs) +void __irq_entry do_IRQ(struct pt_regs *regs) { struct tpi_info *tpi_info; struct subchannel *sch; -- cgit v1.2.3 From 9bf1226b33dc2f94fc37ffd70ee161e2bda1ff5c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:47 +0200 Subject: [S390] ftrace: add system call tracer support System call tracer support for s390. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 + arch/s390/include/asm/syscall.h | 1 + arch/s390/include/asm/thread_info.h | 2 ++ arch/s390/kernel/entry.S | 4 ++- arch/s390/kernel/entry64.S | 4 ++- arch/s390/kernel/ftrace.c | 56 +++++++++++++++++++++++++++++++++++++ arch/s390/kernel/ptrace.c | 7 +++++ 7 files changed, 73 insertions(+), 2 deletions(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9023cc900bda..99dc3ded6b49 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -84,6 +84,7 @@ config S390 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FTRACE_SYSCALLS select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DEFAULT_NO_SPIN_MUTEXES diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 2429b87eb28d..e0a73d3eb837 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -12,6 +12,7 @@ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 +#include #include static inline long syscall_get_nr(struct task_struct *task, diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 2f86653dda69..925bcc649035 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -92,6 +92,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ #define TIF_SECCOMP 10 /* secure computing */ +#define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -110,6 +111,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_TRACE (1<>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) +_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ + _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT @@ -1108,6 +1109,7 @@ cleanup_io_leave_insn: .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esa + .globl sys_call_table sys_call_table: #include "syscalls.S" #undef SYSCALL diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 3cec9b504f5f..f6618e9e15ef 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -56,7 +56,8 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING) -_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | _TIF_SECCOMP>>8) +_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ + _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) #define BASED(name) name-system_call(%r13) @@ -1059,6 +1060,7 @@ cleanup_io_leave_insn: .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esame + .globl sys_call_table sys_call_table: #include "syscalls.S" #undef SYSCALL diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index c92a10953279..82ddfd3a75af 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #ifdef CONFIG_DYNAMIC_FTRACE @@ -202,3 +203,58 @@ out: return parent; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; +extern unsigned int sys_call_table[]; + +static struct syscall_metadata **syscalls_metadata; + +struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +static struct syscall_metadata *find_syscall_meta(unsigned long syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup(syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + if (start->name && !strcmp(start->name + 3, str + 3)) + return start; + } + return NULL; +} + +void arch_init_ftrace_syscalls(void) +{ + struct syscall_metadata *meta; + int i; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto out; + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, + GFP_KERNEL); + if (!syscalls_metadata) + goto out; + for (i = 0; i < NR_syscalls; i++) { + meta = find_syscall_meta((unsigned long)sys_call_table[i]); + syscalls_metadata[i] = meta; + } + return; +out: + atomic_dec(&refs); +} +#endif diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index b6fc1ae2ffcb..490b39934d65 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -661,6 +662,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) ret = -1; } + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_enter(regs); + if (unlikely(current->audit_context)) audit_syscall_entry(is_compat_task() ? AUDIT_ARCH_S390 : AUDIT_ARCH_S390X, @@ -676,6 +680,9 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]); + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); } -- cgit v1.2.3 From fc39453debac927a3ba477bafbc6c18c7166ae78 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:48 +0200 Subject: [S390] wire up sys_rt_tgsigqueueinfo Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/unistd.h | 3 ++- arch/s390/kernel/compat_wrapper.S | 8 ++++++++ arch/s390/kernel/syscalls.S | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index f0f19e6ace6c..90557237172b 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -267,7 +267,8 @@ #define __NR_epoll_create1 327 #define __NR_preadv 328 #define __NR_pwritev 329 -#define NR_syscalls 330 +#define __NR_rt_tgsigqueueinfo 330 +#define NR_syscalls 331 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index fb38af6316bb..d2e7fbf5d65d 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1823,3 +1823,11 @@ compat_sys_pwritev_wrapper: llgfr %r5,%r5 # u32 llgfr %r6,%r6 # u32 jg compat_sys_pwritev # branch to system call + + .globl compat_sys_rt_tgsigqueueinfo_wrapper +compat_sys_rt_tgsigqueueinfo_wrapper: + lgfr %r2,%r2 # compat_pid_t + lgfr %r3,%r3 # compat_pid_t + lgfr %r4,%r4 # int + llgtr %r5,%r5 # struct compat_siginfo * + jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 2c7739fe70b1..82f89ef87d2e 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -338,3 +338,4 @@ SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper) SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) +SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ -- cgit v1.2.3 From 310d6b671588dd7695cbc0d09d02e41d94a42bed Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 12 Jun 2009 10:26:49 +0200 Subject: [S390] wire up sys_perf_counter_open Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/unistd.h | 3 ++- arch/s390/kernel/compat_wrapper.S | 9 +++++++++ arch/s390/kernel/syscalls.S | 1 + 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 90557237172b..c80602d7c880 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -268,7 +268,8 @@ #define __NR_preadv 328 #define __NR_pwritev 329 #define __NR_rt_tgsigqueueinfo 330 -#define NR_syscalls 331 +#define __NR_perf_counter_open 331 +#define NR_syscalls 332 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index d2e7fbf5d65d..88a83366819f 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1831,3 +1831,12 @@ compat_sys_rt_tgsigqueueinfo_wrapper: lgfr %r4,%r4 # int llgtr %r5,%r5 # struct compat_siginfo * jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call + + .globl sys_perf_counter_open_wrapper +sys_perf_counter_open_wrapper: + llgtr %r2,%r2 # const struct perf_counter_attr * + lgfr %r3,%r3 # pid_t + lgfr %r4,%r4 # int + lgfr %r5,%r5 # int + llgfr %r6,%r6 # unsigned long + jg sys_perf_counter_open # branch to system call diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 82f89ef87d2e..ad1acd200385 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -339,3 +339,4 @@ SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper) SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ +SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper) -- cgit v1.2.3 From 1380a37e3da5d9e14ea5c2a4c6ab2b307a2798ea Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 15 May 2009 00:52:00 +0200 Subject: PM: Remove unused asm/suspend.h This patch removes unused asm/suspend.h files for the following architectures: alpha, arm, ia64, m68k, mips, s390, um Signed-off-by: Magnus Damm Acked-by: Pavel Machek Signed-off-by: Rafael J. Wysocki --- arch/alpha/include/asm/suspend.h | 6 ------ arch/arm/include/asm/suspend.h | 4 ---- arch/ia64/include/asm/suspend.h | 1 - arch/m68k/include/asm/suspend.h | 6 ------ arch/mips/include/asm/suspend.h | 6 ------ arch/s390/include/asm/suspend.h | 5 ----- arch/um/include/asm/suspend.h | 4 ---- 7 files changed, 32 deletions(-) delete mode 100644 arch/alpha/include/asm/suspend.h delete mode 100644 arch/arm/include/asm/suspend.h delete mode 100644 arch/ia64/include/asm/suspend.h delete mode 100644 arch/m68k/include/asm/suspend.h delete mode 100644 arch/mips/include/asm/suspend.h delete mode 100644 arch/s390/include/asm/suspend.h delete mode 100644 arch/um/include/asm/suspend.h (limited to 'arch/s390/include/asm') diff --git a/arch/alpha/include/asm/suspend.h b/arch/alpha/include/asm/suspend.h deleted file mode 100644 index c7042d575851..000000000000 --- a/arch/alpha/include/asm/suspend.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ALPHA_SUSPEND_H -#define __ALPHA_SUSPEND_H - -/* Dummy include. */ - -#endif /* __ALPHA_SUSPEND_H */ diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h deleted file mode 100644 index cf0d0bdee74d..000000000000 --- a/arch/arm/include/asm/suspend.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _ASMARM_SUSPEND_H -#define _ASMARM_SUSPEND_H - -#endif diff --git a/arch/ia64/include/asm/suspend.h b/arch/ia64/include/asm/suspend.h deleted file mode 100644 index b05bbb6074e2..000000000000 --- a/arch/ia64/include/asm/suspend.h +++ /dev/null @@ -1 +0,0 @@ -/* dummy (must be non-empty to prevent prejudicial removal...) */ diff --git a/arch/m68k/include/asm/suspend.h b/arch/m68k/include/asm/suspend.h deleted file mode 100644 index 57b3ddb4d269..000000000000 --- a/arch/m68k/include/asm/suspend.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SUSPEND_H -#define _M68K_SUSPEND_H - -/* Dummy include. */ - -#endif /* _M68K_SUSPEND_H */ diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h deleted file mode 100644 index 2562f8f9be0e..000000000000 --- a/arch/mips/include/asm/suspend.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_SUSPEND_H -#define __ASM_SUSPEND_H - -/* Somewhen... Maybe :-) */ - -#endif /* __ASM_SUSPEND_H */ diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h deleted file mode 100644 index 1f34580e67a7..000000000000 --- a/arch/s390/include/asm/suspend.h +++ /dev/null @@ -1,5 +0,0 @@ -#ifndef __ASM_S390_SUSPEND_H -#define __ASM_S390_SUSPEND_H - -#endif - diff --git a/arch/um/include/asm/suspend.h b/arch/um/include/asm/suspend.h deleted file mode 100644 index f4e8e007f468..000000000000 --- a/arch/um/include/asm/suspend.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef __UM_SUSPEND_H -#define __UM_SUSPEND_H - -#endif -- cgit v1.2.3