summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-15 04:43:21 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-15 04:43:21 +0100
commit8d5c315059460e665c804d5a9b641f7f0a1e9dd7 (patch)
treeb9c598b2ac7a4fd7cf121ce733b98b0f18b8a1df /arch/arm/kernel
parent[PATCH] When CONFIG_CC_OPTIMIZE_FOR_SIZE, allow gcc4 to control inlining (diff)
parent[ARM] 3262/4: allow ptraced syscalls to be overriden (diff)
downloadlinux-8d5c315059460e665c804d5a9b641f7f0a1e9dd7.tar.xz
linux-8d5c315059460e665c804d5a9b641f7f0a1e9dd7.zip
Merge master.kernel.org:/home/rmk/linux-2.6-arm
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/armksyms.c22
-rw-r--r--arch/arm/kernel/calls.S59
-rw-r--r--arch/arm/kernel/entry-armv.S24
-rw-r--r--arch/arm/kernel/entry-common.S146
-rw-r--r--arch/arm/kernel/entry-header.S1
-rw-r--r--arch/arm/kernel/head.S7
-rw-r--r--arch/arm/kernel/ptrace.c15
-rw-r--r--arch/arm/kernel/semaphore.c17
-rw-r--r--arch/arm/kernel/sys_arm.c2
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c339
-rw-r--r--arch/arm/kernel/traps.c2
12 files changed, 574 insertions, 61 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index de94b0f3ee2a..2ce0e3a27a45 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o
obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 9997098009a9..1574941ebfe1 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -35,6 +35,16 @@ extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __do_div64(void);
+extern void __aeabi_idiv(void);
+extern void __aeabi_idivmod(void);
+extern void __aeabi_lasr(void);
+extern void __aeabi_llsl(void);
+extern void __aeabi_llsr(void);
+extern void __aeabi_lmul(void);
+extern void __aeabi_uidiv(void);
+extern void __aeabi_uidivmod(void);
+extern void __aeabi_ulcmp(void);
+
extern void fpundefinstr(void);
extern void fp_enter(void);
@@ -141,6 +151,18 @@ EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__do_div64);
+#ifdef CONFIG_AEABI
+EXPORT_SYMBOL(__aeabi_idiv);
+EXPORT_SYMBOL(__aeabi_idivmod);
+EXPORT_SYMBOL(__aeabi_lasr);
+EXPORT_SYMBOL(__aeabi_llsl);
+EXPORT_SYMBOL(__aeabi_llsr);
+EXPORT_SYMBOL(__aeabi_lmul);
+EXPORT_SYMBOL(__aeabi_uidiv);
+EXPORT_SYMBOL(__aeabi_uidivmod);
+EXPORT_SYMBOL(__aeabi_ulcmp);
+#endif
+
/* bitops */
EXPORT_SYMBOL(_set_bit_le);
EXPORT_SYMBOL(_test_and_set_bit_le);
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 55076a75e5bf..75e6f9a94713 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -13,7 +13,7 @@
#define NR_syscalls 328
#else
-__syscall_start:
+100:
/* 0 */ .long sys_restart_syscall
.long sys_exit
.long sys_fork_wrapper
@@ -27,7 +27,7 @@ __syscall_start:
/* 10 */ .long sys_unlink
.long sys_execve_wrapper
.long sys_chdir
- .long sys_time /* used by libc4 */
+ .long OBSOLETE(sys_time) /* used by libc4 */
.long sys_mknod
/* 15 */ .long sys_chmod
.long sys_lchown16
@@ -36,15 +36,15 @@ __syscall_start:
.long sys_lseek
/* 20 */ .long sys_getpid
.long sys_mount
- .long sys_oldumount /* used by libc4 */
+ .long OBSOLETE(sys_oldumount) /* used by libc4 */
.long sys_setuid16
.long sys_getuid16
-/* 25 */ .long sys_stime
+/* 25 */ .long OBSOLETE(sys_stime)
.long sys_ptrace
- .long sys_alarm /* used by libc4 */
+ .long OBSOLETE(sys_alarm) /* used by libc4 */
.long sys_ni_syscall /* was sys_fstat */
.long sys_pause
-/* 30 */ .long sys_utime /* used by libc4 */
+/* 30 */ .long OBSOLETE(sys_utime) /* used by libc4 */
.long sys_ni_syscall /* was sys_stty */
.long sys_ni_syscall /* was sys_getty */
.long sys_access
@@ -90,21 +90,21 @@ __syscall_start:
.long sys_sigpending
.long sys_sethostname
/* 75 */ .long sys_setrlimit
- .long sys_old_getrlimit /* used by libc4 */
+ .long OBSOLETE(sys_old_getrlimit) /* used by libc4 */
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
/* 80 */ .long sys_getgroups16
.long sys_setgroups16
- .long old_select /* used by libc4 */
+ .long OBSOLETE(old_select) /* used by libc4 */
.long sys_symlink
.long sys_ni_syscall /* was sys_lstat */
/* 85 */ .long sys_readlink
.long sys_uselib
.long sys_swapon
.long sys_reboot
- .long old_readdir /* used by libc4 */
-/* 90 */ .long old_mmap /* used by libc4 */
+ .long OBSOLETE(old_readdir) /* used by libc4 */
+/* 90 */ .long OBSOLETE(old_mmap) /* used by libc4 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
@@ -116,7 +116,7 @@ __syscall_start:
.long sys_statfs
/* 100 */ .long sys_fstatfs
.long sys_ni_syscall
- .long sys_socketcall
+ .long OBSOLETE(sys_socketcall)
.long sys_syslog
.long sys_setitimer
/* 105 */ .long sys_getitimer
@@ -127,11 +127,11 @@ __syscall_start:
/* 110 */ .long sys_ni_syscall /* was sys_iopl */
.long sys_vhangup
.long sys_ni_syscall
- .long sys_syscall /* call a syscall */
+ .long OBSOLETE(sys_syscall) /* call a syscall */
.long sys_wait4
/* 115 */ .long sys_swapoff
.long sys_sysinfo
- .long sys_ipc
+ .long OBSOLETE(ABI(sys_ipc, sys_oabi_ipc))
.long sys_fsync
.long sys_sigreturn_wrapper
/* 120 */ .long sys_clone_wrapper
@@ -194,8 +194,8 @@ __syscall_start:
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend_wrapper
-/* 180 */ .long sys_pread64
- .long sys_pwrite64
+/* 180 */ .long ABI(sys_pread64, sys_oabi_pread64)
+ .long ABI(sys_pwrite64, sys_oabi_pwrite64)
.long sys_chown16
.long sys_getcwd
.long sys_capget
@@ -207,11 +207,11 @@ __syscall_start:
/* 190 */ .long sys_vfork_wrapper
.long sys_getrlimit
.long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
-/* 195 */ .long sys_stat64
- .long sys_lstat64
- .long sys_fstat64
+ .long ABI(sys_truncate64, sys_oabi_truncate64)
+ .long ABI(sys_ftruncate64, sys_oabi_ftruncate64)
+/* 195 */ .long ABI(sys_stat64, sys_oabi_stat64)
+ .long ABI(sys_lstat64, sys_oabi_lstat64)
+ .long ABI(sys_fstat64, sys_oabi_fstat64)
.long sys_lchown
.long sys_getuid
/* 200 */ .long sys_getgid
@@ -235,11 +235,11 @@ __syscall_start:
.long sys_pivot_root
.long sys_mincore
/* 220 */ .long sys_madvise
- .long sys_fcntl64
+ .long ABI(sys_fcntl64, sys_oabi_fcntl64)
.long sys_ni_syscall /* TUX */
.long sys_ni_syscall
.long sys_gettid
-/* 225 */ .long sys_readahead
+/* 225 */ .long ABI(sys_readahead, sys_oabi_readahead)
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr
@@ -265,8 +265,8 @@ __syscall_start:
.long sys_exit_group
.long sys_lookup_dcookie
/* 250 */ .long sys_epoll_create
- .long sys_epoll_ctl
- .long sys_epoll_wait
+ .long ABI(sys_epoll_ctl, sys_oabi_epoll_ctl)
+ .long ABI(sys_epoll_wait, sys_oabi_epoll_wait)
.long sys_remap_file_pages
.long sys_ni_syscall /* sys_set_thread_area */
/* 255 */ .long sys_ni_syscall /* sys_get_thread_area */
@@ -280,8 +280,8 @@ __syscall_start:
.long sys_clock_gettime
.long sys_clock_getres
/* 265 */ .long sys_clock_nanosleep
- .long sys_statfs64
- .long sys_fstatfs64
+ .long sys_statfs64_wrapper
+ .long sys_fstatfs64_wrapper
.long sys_tgkill
.long sys_utimes
/* 270 */ .long sys_arm_fadvise64_64
@@ -312,7 +312,7 @@ __syscall_start:
/* 295 */ .long sys_getsockopt
.long sys_sendmsg
.long sys_recvmsg
- .long sys_semop
+ .long ABI(sys_semop, sys_oabi_semop)
.long sys_semget
/* 300 */ .long sys_semctl
.long sys_msgsnd
@@ -326,7 +326,7 @@ __syscall_start:
.long sys_add_key
/* 310 */ .long sys_request_key
.long sys_keyctl
- .long sys_semtimedop
+ .long ABI(sys_semtimedop, sys_oabi_semtimedop)
/* vserver */ .long sys_ni_syscall
.long sys_ioprio_set
/* 315 */ .long sys_ioprio_get
@@ -336,9 +336,8 @@ __syscall_start:
.long sys_mbind
/* 320 */ .long sys_get_mempolicy
.long sys_set_mempolicy
-__syscall_end:
- .rept NR_syscalls - (__syscall_end - __syscall_start) / 4
+ .rept NR_syscalls - (. - 100b) / 4
.long sys_ni_syscall
.endr
#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a52baedf6262..874e6bb79405 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -3,6 +3,7 @@
*
* Copyright (C) 1996,1997,1998 Russell King.
* ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
+ * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -104,14 +105,24 @@ common_invalid:
/*
* SVC mode handlers
*/
+
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+#define SPFIX(code...) code
+#else
+#define SPFIX(code...)
+#endif
+
.macro svc_entry
sub sp, sp, #S_FRAME_SIZE
+ SPFIX( tst sp, #4 )
+ SPFIX( bicne sp, sp, #4 )
stmib sp, {r1 - r12}
ldmia r0, {r1 - r3}
add r5, sp, #S_SP @ here for interlock avoidance
mov r4, #-1 @ "" "" "" ""
add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
+ SPFIX( addne r0, r0, #4 )
str r1, [sp] @ save the "real" r0 copied
@ from the exception stack
@@ -302,7 +313,14 @@ __pabt_svc:
/*
* User mode handlers
+ *
+ * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
*/
+
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
+#error "sizeof(struct pt_regs) must be a multiple of 8"
+#endif
+
.macro usr_entry
sub sp, sp, #S_FRAME_SIZE
stmib sp, {r1 - r12}
@@ -538,7 +556,11 @@ ENTRY(__switch_to)
add ip, r1, #TI_CPU_SAVE
ldr r3, [r2, #TI_TP_VALUE]
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
+#ifndef CONFIG_MMU
+ add r2, r2, #TI_CPU_DOMAIN
+#else
ldr r6, [r2, #TI_CPU_DOMAIN]!
+#endif
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_MPCORE
clrex
@@ -556,7 +578,9 @@ ENTRY(__switch_to)
mov r4, #0xffff0fff
str r3, [r4, #-15] @ TLS val at 0xffff0ff0
#endif
+#ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
+#endif
#ifdef CONFIG_VFP
@ Always disable VFP so we can lazily save/restore the old
@ state. This occurs in the context of the previous thread.
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index e2b42997ad33..2b92ce85f97f 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -98,20 +98,14 @@ ENTRY(ret_from_fork)
run on an ARM7 and we can save a couple of instructions.
--pb */
#ifdef CONFIG_CPU_ARM710
- .macro arm710_bug_check, instr, temp
- and \temp, \instr, #0x0f000000 @ check for SWI
- teq \temp, #0x0f000000
- bne .Larm700bug
- .endm
-
-.Larm700bug:
+#define A710(code...) code
+.Larm710bug:
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE
subs pc, lr, #4
#else
- .macro arm710_bug_check, instr, temp
- .endm
+#define A710(code...)
#endif
.align 5
@@ -129,14 +123,50 @@ ENTRY(vector_swi)
/*
* Get the system call number.
*/
+
+#if defined(CONFIG_OABI_COMPAT)
+
+ /*
+ * If we have CONFIG_OABI_COMPAT then we need to look at the swi
+ * value to determine if it is an EABI or an old ABI call.
+ */
#ifdef CONFIG_ARM_THUMB
+ tst r8, #PSR_T_BIT
+ movne r10, #0 @ no thumb OABI emulation
+ ldreq r10, [lr, #-4] @ get SWI instruction
+#else
+ ldr r10, [lr, #-4] @ get SWI instruction
+ A710( and ip, r10, #0x0f000000 @ check for SWI )
+ A710( teq ip, #0x0f000000 )
+ A710( bne .Larm710bug )
+#endif
+
+#elif defined(CONFIG_AEABI)
+
+ /*
+ * Pure EABI user space always put syscall number into scno (r7).
+ */
+ A710( ldr ip, [lr, #-4] @ get SWI instruction )
+ A710( and ip, ip, #0x0f000000 @ check for SWI )
+ A710( teq ip, #0x0f000000 )
+ A710( bne .Larm710bug )
+
+#elif defined(CONFIG_ARM_THUMB)
+
+ /* Legacy ABI only, possibly thumb mode. */
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
ldreq scno, [lr, #-4]
+
#else
+
+ /* Legacy ABI only. */
ldr scno, [lr, #-4] @ get SWI instruction
+ A710( and ip, scno, #0x0f000000 @ check for SWI )
+ A710( teq ip, #0x0f000000 )
+ A710( bne .Larm710bug )
+
#endif
- arm710_bug_check scno, ip
#ifdef CONFIG_ALIGNMENT_TRAP
ldr ip, __cr_alignment
@@ -145,18 +175,31 @@ ENTRY(vector_swi)
#endif
enable_irq
- stmdb sp!, {r4, r5} @ push fifth and sixth args
-
get_thread_info tsk
+ adr tbl, sys_call_table @ load syscall table pointer
ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
+
+#if defined(CONFIG_OABI_COMPAT)
+ /*
+ * If the swi argument is zero, this is an EABI call and we do nothing.
+ *
+ * If this is an old ABI call, get the syscall number into scno and
+ * get the old ABI syscall table address.
+ */
+ bics r10, r10, #0xff000000
+ eorne scno, r10, #__NR_OABI_SYSCALL_BASE
+ ldrne tbl, =sys_oabi_call_table
+#elif !defined(CONFIG_AEABI)
bic scno, scno, #0xff000000 @ mask off SWI op-code
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
- adr tbl, sys_call_table @ load syscall table pointer
+#endif
+
+ stmdb sp!, {r4, r5} @ push fifth and sixth args
tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
bne __sys_trace
- adr lr, ret_fast_syscall @ return address
cmp scno, #NR_syscalls @ check upper syscall limit
+ adr lr, ret_fast_syscall @ return address
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
@@ -171,11 +214,13 @@ ENTRY(vector_swi)
* context switches, and waiting for our parent to respond.
*/
__sys_trace:
+ mov r2, scno
add r1, sp, #S_OFF
mov r0, #0 @ trace entry [IP = 0]
bl syscall_trace
adr lr, __sys_trace_return @ return address
+ mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
ldmccia r1, {r0 - r3} @ have to reload r0 - r3
@@ -184,6 +229,7 @@ __sys_trace:
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ mov r2, scno
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
bl syscall_trace
@@ -195,10 +241,24 @@ __sys_trace_return:
__cr_alignment:
.word cr_alignment
#endif
+ .ltorg
+
+/*
+ * This is the syscall table declaration for native ABI syscalls.
+ * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
+ */
+#define ABI(native, compat) native
+#ifdef CONFIG_AEABI
+#define OBSOLETE(syscall) sys_ni_syscall
+#else
+#define OBSOLETE(syscall) syscall
+#endif
.type sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
+#undef ABI
+#undef OBSOLETE
/*============================================================================
* Special system call wrappers
@@ -207,7 +267,7 @@ ENTRY(sys_call_table)
@ r8 = syscall table
.type sys_syscall, #function
sys_syscall:
- eor scno, r0, #__NR_SYSCALL_BASE
+ eor scno, r0, #__NR_OABI_SYSCALL_BASE
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
cmpne scno, #NR_syscalls @ check range
stmloia sp, {r5, r6} @ shuffle args
@@ -255,6 +315,16 @@ sys_sigaltstack_wrapper:
ldr r2, [sp, #S_OFF + S_SP]
b do_sigaltstack
+sys_statfs64_wrapper:
+ teq r1, #88
+ moveq r1, #84
+ b sys_statfs64
+
+sys_fstatfs64_wrapper:
+ teq r1, #88
+ moveq r1, #84
+ b sys_fstatfs64
+
/*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
* offset, we return EINVAL.
@@ -271,3 +341,49 @@ sys_mmap2:
str r5, [sp, #4]
b do_mmap2
#endif
+
+#ifdef CONFIG_OABI_COMPAT
+
+/*
+ * These are syscalls with argument register differences
+ */
+
+sys_oabi_pread64:
+ stmia sp, {r3, r4}
+ b sys_pread64
+
+sys_oabi_pwrite64:
+ stmia sp, {r3, r4}
+ b sys_pwrite64
+
+sys_oabi_truncate64:
+ mov r3, r2
+ mov r2, r1
+ b sys_truncate64
+
+sys_oabi_ftruncate64:
+ mov r3, r2
+ mov r2, r1
+ b sys_ftruncate64
+
+sys_oabi_readahead:
+ str r3, [sp]
+ mov r3, r2
+ mov r2, r1
+ b sys_readahead
+
+/*
+ * Let's declare a second syscall table for old ABI binaries
+ * using the compatibility syscall entries.
+ */
+#define ABI(native, compat) compat
+#define OBSOLETE(syscall) syscall
+
+ .type sys_oabi_call_table, #object
+ENTRY(sys_oabi_call_table)
+#include "calls.S"
+#undef ABI
+#undef OBSOLETE
+
+#endif
+
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 648cfff93138..55c99cdab7d6 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -19,6 +19,7 @@
@
@ Most of the stack format comes from struct pt_regs, but with
@ the addition of 8 bytes for storing syscall args 5 and 6.
+@ This _must_ remain a multiple of 8 for EABI.
@
#define S_OFF 8
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 1e985f2cd70f..1aca1775b28f 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -251,12 +251,11 @@ __turn_mmu_on:
* r10 = procinfo
*
* Returns:
- * r0, r3, r5, r6, r7 corrupted
+ * r0, r3, r6, r7 corrupted
* r4 = physical page table address
*/
.type __create_page_tables, %function
__create_page_tables:
- ldr r5, [r8, #MACHINFO_PHYSRAM] @ physram
pgtbl r4 @ page table address
/*
@@ -303,7 +302,7 @@ __create_page_tables:
* Then map first 1MB of ram in case it contains our boot params.
*/
add r0, r4, #PAGE_OFFSET >> 18
- orr r6, r5, r7
+ orr r6, r7, #PHYS_OFFSET
str r6, [r0]
#ifdef CONFIG_XIP_KERNEL
@@ -311,7 +310,7 @@ __create_page_tables:
* Map some ram to cover our .data and .bss areas.
* Mapping 3MB should be plenty.
*/
- sub r3, r4, r5
+ sub r3, r4, #PHYS_OFFSET
mov r3, r3, lsr #20
add r0, r0, r3, lsl #2
add r6, r6, r3, lsl #20
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index e591f72bcdeb..7b6256bb590e 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -766,6 +766,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
(unsigned long __user *) data);
break;
+ case PTRACE_SET_SYSCALL:
+ ret = 0;
+ child->ptrace_message = data;
+ break;
+
default:
ret = ptrace_request(child, request, addr, data);
break;
@@ -774,14 +779,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-asmlinkage void syscall_trace(int why, struct pt_regs *regs)
+asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
+ return scno;
if (!(current->ptrace & PT_PTRACED))
- return;
+ return scno;
/*
* Save IP. IP is used to denote syscall entry/exit:
@@ -790,6 +795,8 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs)
ip = regs->ARM_ip;
regs->ARM_ip = why;
+ current->ptrace_message = scno;
+
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
@@ -804,4 +811,6 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs)
current->exit_code = 0;
}
regs->ARM_ip = ip;
+
+ return current->ptrace_message;
}
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c
index 4c31f2923055..981fe5c6ccbe 100644
--- a/arch/arm/kernel/semaphore.c
+++ b/arch/arm/kernel/semaphore.c
@@ -177,41 +177,42 @@ int __down_trylock(struct semaphore * sem)
* ip contains the semaphore pointer on entry. Save the C-clobbered
* registers (r0 to r3 and lr), but not ip, as we use it as a return
* value in some cases..
+ * To remain AAPCS compliant (64-bit stack align) we save r4 as well.
*/
asm(" .section .sched.text,\"ax\",%progbits \n\
.align 5 \n\
.globl __down_failed \n\
__down_failed: \n\
- stmfd sp!, {r0 - r3, lr} \n\
+ stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __down \n\
- ldmfd sp!, {r0 - r3, pc} \n\
+ ldmfd sp!, {r0 - r4, pc} \n\
\n\
.align 5 \n\
.globl __down_interruptible_failed \n\
__down_interruptible_failed: \n\
- stmfd sp!, {r0 - r3, lr} \n\
+ stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __down_interruptible \n\
mov ip, r0 \n\
- ldmfd sp!, {r0 - r3, pc} \n\
+ ldmfd sp!, {r0 - r4, pc} \n\
\n\
.align 5 \n\
.globl __down_trylock_failed \n\
__down_trylock_failed: \n\
- stmfd sp!, {r0 - r3, lr} \n\
+ stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __down_trylock \n\
mov ip, r0 \n\
- ldmfd sp!, {r0 - r3, pc} \n\
+ ldmfd sp!, {r0 - r4, pc} \n\
\n\
.align 5 \n\
.globl __up_wakeup \n\
__up_wakeup: \n\
- stmfd sp!, {r0 - r3, lr} \n\
+ stmfd sp!, {r0 - r4, lr} \n\
mov r0, ip \n\
bl __up \n\
- ldmfd sp!, {r0 - r3, pc} \n\
+ ldmfd sp!, {r0 - r4, pc} \n\
");
EXPORT_SYMBOL(__down_failed);
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index ea569ba482b1..a491de2d9024 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -147,6 +147,7 @@ asmlinkage int old_select(struct sel_arg_struct __user *arg)
return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
}
+#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
@@ -226,6 +227,7 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third,
return -ENOSYS;
}
}
+#endif
/* Fork a new task - this creates a new program thread.
* This is called indirectly via a small wrapper
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
new file mode 100644
index 000000000000..eafa8e5284af
--- /dev/null
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -0,0 +1,339 @@
+/*
+ * arch/arm/kernel/sys_oabi-compat.c
+ *
+ * Compatibility wrappers for syscalls that are used from
+ * old ABI user space binaries with an EABI kernel.
+ *
+ * Author: Nicolas Pitre
+ * Created: Oct 7, 2005
+ * Copyright: MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * The legacy ABI and the new ARM EABI have different rules making some
+ * syscalls incompatible especially with structure arguments.
+ * Most notably, Eabi says 64-bit members should be 64-bit aligned instead of
+ * simply word aligned. EABI also pads structures to the size of the largest
+ * member it contains instead of the invariant 32-bit.
+ *
+ * The following syscalls are affected:
+ *
+ * sys_stat64:
+ * sys_lstat64:
+ * sys_fstat64:
+ *
+ * struct stat64 has different sizes and some members are shifted
+ * Compatibility wrappers are needed for them and provided below.
+ *
+ * sys_fcntl64:
+ *
+ * struct flock64 has different sizes and some members are shifted
+ * A compatibility wrapper is needed and provided below.
+ *
+ * sys_statfs64:
+ * sys_fstatfs64:
+ *
+ * struct statfs64 has extra padding with EABI growing its size from
+ * 84 to 88. This struct is now __attribute__((packed,aligned(4)))
+ * with a small assembly wrapper to force the sz argument to 84 if it is 88
+ * to avoid copying the extra padding over user space unexpecting it.
+ *
+ * sys_newuname:
+ *
+ * struct new_utsname has no padding with EABI. No problem there.
+ *
+ * sys_epoll_ctl:
+ * sys_epoll_wait:
+ *
+ * struct epoll_event has its second member shifted also affecting the
+ * structure size. Compatibility wrappers are needed and provided below.
+ *
+ * sys_ipc:
+ * sys_semop:
+ * sys_semtimedop:
+ *
+ * struct sembuf loses its padding with EABI. Since arrays of them are
+ * used they have to be copyed to remove the padding. Compatibility wrappers
+ * provided below.
+ */
+
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/eventpoll.h>
+#include <linux/sem.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+
+struct oldabi_stat64 {
+ unsigned long long st_dev;
+ unsigned int __pad1;
+ unsigned long __st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned long long st_rdev;
+ unsigned int __pad2;
+
+ long long st_size;
+ unsigned long st_blksize;
+ unsigned long long st_blocks;
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+
+ unsigned long long st_ino;
+} __attribute__ ((packed,aligned(4)));
+
+static long cp_oldabi_stat64(struct kstat *stat,
+ struct oldabi_stat64 __user *statbuf)
+{
+ struct oldabi_stat64 tmp;
+
+ tmp.st_dev = huge_encode_dev(stat->dev);
+ tmp.__pad1 = 0;
+ tmp.__st_ino = stat->ino;
+ tmp.st_mode = stat->mode;
+ tmp.st_nlink = stat->nlink;
+ tmp.st_uid = stat->uid;
+ tmp.st_gid = stat->gid;
+ tmp.st_rdev = huge_encode_dev(stat->rdev);
+ tmp.st_size = stat->size;
+ tmp.st_blocks = stat->blocks;
+ tmp.__pad2 = 0;
+ tmp.st_blksize = stat->blksize;
+ tmp.st_atime = stat->atime.tv_sec;
+ tmp.st_atime_nsec = stat->atime.tv_nsec;
+ tmp.st_mtime = stat->mtime.tv_sec;
+ tmp.st_mtime_nsec = stat->mtime.tv_nsec;
+ tmp.st_ctime = stat->ctime.tv_sec;
+ tmp.st_ctime_nsec = stat->ctime.tv_nsec;
+ tmp.st_ino = stat->ino;
+ return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+asmlinkage long sys_oabi_stat64(char __user * filename,
+ struct oldabi_stat64 __user * statbuf)
+{
+ struct kstat stat;
+ int error = vfs_stat(filename, &stat);
+ if (!error)
+ error = cp_oldabi_stat64(&stat, statbuf);
+ return error;
+}
+
+asmlinkage long sys_oabi_lstat64(char __user * filename,
+ struct oldabi_stat64 __user * statbuf)
+{
+ struct kstat stat;
+ int error = vfs_lstat(filename, &stat);
+ if (!error)
+ error = cp_oldabi_stat64(&stat, statbuf);
+ return error;
+}
+
+asmlinkage long sys_oabi_fstat64(unsigned long fd,
+ struct oldabi_stat64 __user * statbuf)
+{
+ struct kstat stat;
+ int error = vfs_fstat(fd, &stat);
+ if (!error)
+ error = cp_oldabi_stat64(&stat, statbuf);
+ return error;
+}
+
+struct oabi_flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+} __attribute__ ((packed,aligned(4)));
+
+asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
+ unsigned long arg)
+{
+ struct oabi_flock64 user;
+ struct flock64 kernel;
+ mm_segment_t fs = USER_DS; /* initialized to kill a warning */
+ unsigned long local_arg = arg;
+ int ret;
+
+ switch (cmd) {
+ case F_GETLK64:
+ case F_SETLK64:
+ case F_SETLKW64:
+ if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
+ sizeof(user)))
+ return -EFAULT;
+ kernel.l_type = user.l_type;
+ kernel.l_whence = user.l_whence;
+ kernel.l_start = user.l_start;
+ kernel.l_len = user.l_len;
+ kernel.l_pid = user.l_pid;
+ local_arg = (unsigned long)&kernel;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ }
+
+ ret = sys_fcntl64(fd, cmd, local_arg);
+
+ switch (cmd) {
+ case F_GETLK64:
+ if (!ret) {
+ user.l_type = kernel.l_type;
+ user.l_whence = kernel.l_whence;
+ user.l_start = kernel.l_start;
+ user.l_len = kernel.l_len;
+ user.l_pid = kernel.l_pid;
+ if (copy_to_user((struct oabi_flock64 __user *)arg,
+ &user, sizeof(user)))
+ ret = -EFAULT;
+ }
+ case F_SETLK64:
+ case F_SETLKW64:
+ set_fs(fs);
+ }
+
+ return ret;
+}
+
+struct oabi_epoll_event {
+ __u32 events;
+ __u64 data;
+} __attribute__ ((packed,aligned(4)));
+
+asmlinkage long sys_oabi_epoll_ctl(int epfd, int op, int fd,
+ struct oabi_epoll_event __user *event)
+{
+ struct oabi_epoll_event user;
+ struct epoll_event kernel;
+ mm_segment_t fs;
+ long ret;
+
+ if (op == EPOLL_CTL_DEL)
+ return sys_epoll_ctl(epfd, op, fd, NULL);
+ if (copy_from_user(&user, event, sizeof(user)))
+ return -EFAULT;
+ kernel.events = user.events;
+ kernel.data = user.data;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_epoll_ctl(epfd, op, fd, &kernel);
+ set_fs(fs);
+ return ret;
+}
+
+asmlinkage long sys_oabi_epoll_wait(int epfd,
+ struct oabi_epoll_event __user *events,
+ int maxevents, int timeout)
+{
+ struct epoll_event *kbuf;
+ mm_segment_t fs;
+ long ret, err, i;
+
+ if (maxevents <= 0 || maxevents > (INT_MAX/sizeof(struct epoll_event)))
+ return -EINVAL;
+ kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
+ set_fs(fs);
+ err = 0;
+ for (i = 0; i < ret; i++) {
+ __put_user_error(kbuf[i].events, &events->events, err);
+ __put_user_error(kbuf[i].data, &events->data, err);
+ events++;
+ }
+ kfree(kbuf);
+ return err ? -EFAULT : ret;
+}
+
+struct oabi_sembuf {
+ unsigned short sem_num;
+ short sem_op;
+ short sem_flg;
+ unsigned short __pad;
+};
+
+asmlinkage long sys_oabi_semtimedop(int semid,
+ struct oabi_sembuf __user *tsops,
+ unsigned nsops,
+ const struct timespec __user *timeout)
+{
+ struct sembuf *sops;
+ struct timespec local_timeout;
+ long err;
+ int i;
+
+ if (nsops < 1)
+ return -EINVAL;
+ sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+ if (!sops)
+ return -ENOMEM;
+ err = 0;
+ for (i = 0; i < nsops; i++) {
+ __get_user_error(sops[i].sem_num, &tsops->sem_num, err);
+ __get_user_error(sops[i].sem_op, &tsops->sem_op, err);
+ __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err);
+ tsops++;
+ }
+ if (timeout) {
+ /* copy this as well before changing domain protection */
+ err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout));
+ timeout = &local_timeout;
+ }
+ if (err) {
+ err = -EFAULT;
+ } else {
+ mm_segment_t fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_semtimedop(semid, sops, nsops, timeout);
+ set_fs(fs);
+ }
+ kfree(sops);
+ return err;
+}
+
+asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops,
+ unsigned nsops)
+{
+ return sys_oabi_semtimedop(semid, tsops, nsops, NULL);
+}
+
+extern asmlinkage int sys_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth);
+
+asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
+ void __user *ptr, long fifth)
+{
+ switch (call & 0xffff) {
+ case SEMOP:
+ return sys_oabi_semtimedop(first,
+ (struct oabi_sembuf __user *)ptr,
+ second, NULL);
+ case SEMTIMEDOP:
+ return sys_oabi_semtimedop(first,
+ (struct oabi_sembuf __user *)ptr,
+ second,
+ (const struct timespec __user *)fifth);
+ default:
+ return sys_ipc(call, first, second, third, ptr, fifth);
+ }
+}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 93cfd3ffcc72..10235b01582e 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -404,7 +404,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
struct thread_info *thread = current_thread_info();
siginfo_t info;
- if ((no >> 16) != 0x9f)
+ if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {