summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/asm-offsets.c10
-rw-r--r--arch/arm/kernel/calls.S6
-rw-r--r--arch/arm/kernel/entry-armv.S111
-rw-r--r--arch/arm/kernel/entry-common.S5
-rw-r--r--arch/arm/kernel/head-common.S10
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/kprobes.c5
-rw-r--r--arch/arm/kernel/semaphore.c221
-rw-r--r--arch/arm/kernel/signal.c4
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c24
-rw-r--r--arch/arm/kernel/thumbee.c81
12 files changed, 215 insertions, 268 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 00d44c6fbfe9..ad455ff5aebe 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
# Object file lists.
obj-y := compat.o entry-armv.o entry-common.o irq.o \
- process.o ptrace.o semaphore.o setup.o signal.o \
+ process.o ptrace.o setup.o signal.o \
sys_arm.o stacktrace.o time.o traps.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
@@ -22,6 +22,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
obj-$(CONFIG_ATAGS_PROC) += atags.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
+obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 3278e713c32a..0a0d2479274b 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -58,6 +58,9 @@ int main(void)
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
+#ifdef CONFIG_ARM_THUMBEE
+ DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state));
+#endif
#ifdef CONFIG_IWMMXT
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
#endif
@@ -108,5 +111,12 @@ int main(void)
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
DEFINE(PROCINFO_MM_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mm_mmu_flags));
DEFINE(PROCINFO_IO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_io_mmu_flags));
+ BLANK();
+#ifdef MULTI_DABORT
+ DEFINE(PROCESSOR_DABT_FUNC, offsetof(struct processor, _data_abort));
+#endif
+#ifdef MULTI_PABORT
+ DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
+#endif
return 0;
}
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 283e14fff993..30a67a5a40a8 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -336,7 +336,7 @@
CALL(sys_mknodat)
/* 325 */ CALL(sys_fchownat)
CALL(sys_futimesat)
- CALL(sys_fstatat64)
+ CALL(ABI(sys_fstatat64, sys_oabi_fstatat64))
CALL(sys_unlinkat)
CALL(sys_renameat)
/* 330 */ CALL(sys_linkat)
@@ -359,9 +359,11 @@
CALL(sys_kexec_load)
CALL(sys_utimensat)
CALL(sys_signalfd)
-/* 350 */ CALL(sys_ni_syscall)
+/* 350 */ CALL(sys_timerfd_create)
CALL(sys_eventfd)
CALL(sys_fallocate)
+ CALL(sys_timerfd_settime)
+ CALL(sys_timerfd_gettime)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a46d5b456765..7dca225752c1 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -166,12 +166,12 @@ __dabt_svc:
@ The abort handler must return the aborted address in r0, and
@ the fault status register in r1. r9 must be preserved.
@
-#ifdef MULTI_ABORT
+#ifdef MULTI_DABORT
ldr r4, .LCprocfns
mov lr, pc
- ldr pc, [r4]
+ ldr pc, [r4, #PROCESSOR_DABT_FUNC]
#else
- bl CPU_ABORT_HANDLER
+ bl CPU_DABORT_HANDLER
#endif
@
@@ -209,14 +209,12 @@ __irq_svc:
irq_handler
#ifdef CONFIG_PREEMPT
+ str r8, [tsk, #TI_PREEMPT] @ restore preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
+ movne r0, #0 @ force flags to 0
tst r0, #_TIF_NEED_RESCHED
blne svc_preempt
-preempt_return:
- ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
- str r8, [tsk, #TI_PREEMPT] @ restore preempt count
- teq r0, r7
- strne r0, [r0, -r0] @ bug()
#endif
ldr r0, [sp, #S_PSR] @ irqs are already disabled
msr spsr_cxsf, r0
@@ -230,19 +228,11 @@ preempt_return:
#ifdef CONFIG_PREEMPT
svc_preempt:
- teq r8, #0 @ was preempt count = 0
- ldreq r6, .LCirq_stat
- movne pc, lr @ no
- ldr r0, [r6, #4] @ local_irq_count
- ldr r1, [r6, #8] @ local_bh_count
- adds r0, r0, r1
- movne pc, lr
- mov r7, #0 @ preempt_schedule_irq
- str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
+ mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
- beq preempt_return @ go again
+ moveq pc, r8 @ go again
b 1b
#endif
@@ -293,7 +283,6 @@ __pabt_svc:
mrs r9, cpsr
tst r3, #PSR_I_BIT
biceq r9, r9, #PSR_I_BIT
- msr cpsr_c, r9
@
@ set args, then call main handler
@@ -301,7 +290,15 @@ __pabt_svc:
@ r0 - address of faulting instruction
@ r1 - pointer to registers on stack
@
- mov r0, r2 @ address (pc)
+#ifdef MULTI_PABORT
+ mov r0, r2 @ pass address of aborted instruction.
+ ldr r4, .LCprocfns
+ mov lr, pc
+ ldr pc, [r4, #PROCESSOR_PABT_FUNC]
+#else
+ CPU_PABORT_HANDLER(r0, r2)
+#endif
+ msr cpsr_c, r9 @ Maybe enable interrupts
mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler
@@ -320,16 +317,12 @@ __pabt_svc:
.align 5
.LCcralign:
.word cr_alignment
-#ifdef MULTI_ABORT
+#ifdef MULTI_DABORT
.LCprocfns:
.word processor
#endif
.LCfp:
.word fp_enter
-#ifdef CONFIG_PREEMPT
-.LCirq_stat:
- .word irq_stat
-#endif
/*
* User mode handlers
@@ -404,12 +397,12 @@ __dabt_usr:
@ The abort handler must return the aborted address in r0, and
@ the fault status register in r1.
@
-#ifdef MULTI_ABORT
+#ifdef MULTI_DABORT
ldr r4, .LCprocfns
mov lr, pc
- ldr pc, [r4]
+ ldr pc, [r4, #PROCESSOR_DABT_FUNC]
#else
- bl CPU_ABORT_HANDLER
+ bl CPU_DABORT_HANDLER
#endif
@
@@ -455,10 +448,6 @@ __irq_usr:
__und_usr:
usr_entry
- tst r3, #PSR_T_BIT @ Thumb mode?
- bne __und_usr_unknown @ ignore FP
- sub r4, r2, #4
-
@
@ fall through to the emulation code, which returns using r9 if
@ it has emulated the instruction, or the more conventional lr
@@ -468,7 +457,24 @@ __und_usr:
@
adr r9, ret_from_exception
adr lr, __und_usr_unknown
-1: ldrt r0, [r4]
+ tst r3, #PSR_T_BIT @ Thumb mode?
+ subeq r4, r2, #4 @ ARM instr at LR - 4
+ subne r4, r2, #2 @ Thumb instr at LR - 2
+1: ldreqt r0, [r4]
+ beq call_fpe
+ @ Thumb instruction
+#if __LINUX_ARM_ARCH__ >= 7
+2: ldrht r5, [r4], #2
+ and r0, r5, #0xf800 @ mask bits 111x x... .... ....
+ cmp r0, #0xe800 @ 32bit instruction if xx != 0
+ blo __und_usr_unknown
+3: ldrht r0, [r4]
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
+ orr r0, r0, r5, lsl #16
+#else
+ b __und_usr_unknown
+#endif
+
@
@ fallthrough to call_fpe
@
@@ -477,10 +483,14 @@ __und_usr:
* The out of line fixup for the ldrt above.
*/
.section .fixup, "ax"
-2: mov pc, r9
+4: mov pc, r9
.previous
.section __ex_table,"a"
- .long 1b, 2b
+ .long 1b, 4b
+#if __LINUX_ARM_ARCH__ >= 7
+ .long 2b, 4b
+ .long 3b, 4b
+#endif
.previous
/*
@@ -507,9 +517,16 @@ __und_usr:
* r10 = this threads thread_info structure.
* lr = unrecognised instruction return address
*/
+ @
+ @ Fall-through from Thumb-2 __und_usr
+ @
+#ifdef CONFIG_NEON
+ adr r6, .LCneon_thumb_opcodes
+ b 2f
+#endif
call_fpe:
#ifdef CONFIG_NEON
- adr r6, .LCneon_opcodes
+ adr r6, .LCneon_arm_opcodes
2:
ldr r7, [r6], #4 @ mask value
cmp r7, #0 @ end mask?
@@ -526,6 +543,7 @@ call_fpe:
1:
#endif
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
+ tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
and r8, r0, #0x0f000000 @ mask out op-code bits
teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
@@ -577,7 +595,7 @@ call_fpe:
#ifdef CONFIG_NEON
.align 6
-.LCneon_opcodes:
+.LCneon_arm_opcodes:
.word 0xfe000000 @ mask
.word 0xf2000000 @ opcode
@@ -586,6 +604,16 @@ call_fpe:
.word 0x00000000 @ mask
.word 0x00000000 @ opcode
+
+.LCneon_thumb_opcodes:
+ .word 0xef000000 @ mask
+ .word 0xef000000 @ opcode
+
+ .word 0xff100000 @ mask
+ .word 0xf9000000 @ opcode
+
+ .word 0x00000000 @ mask
+ .word 0x00000000 @ opcode
#endif
do_fpe:
@@ -619,8 +647,15 @@ __und_usr_unknown:
__pabt_usr:
usr_entry
+#ifdef MULTI_PABORT
+ mov r0, r2 @ pass address of aborted instruction.
+ ldr r4, .LCprocfns
+ mov lr, pc
+ ldr pc, [r4, #PROCESSOR_PABT_FUNC]
+#else
+ CPU_PABORT_HANDLER(r0, r2)
+#endif
enable_irq @ Enable interrupts
- mov r0, r2 @ address (pc)
mov r1, sp @ regs
bl do_PrefetchAbort @ call abort handler
/* fall through */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 6c90c50a9ee3..597ed00a08d8 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -352,6 +352,11 @@ sys_mmap2:
b do_mmap2
#endif
+ENTRY(pabort_ifar)
+ mrc p15, 0, r0, cr6, cr0, 2
+ENTRY(pabort_noifar)
+ mov pc, lr
+
#ifdef CONFIG_OABI_COMPAT
/*
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 024a9cf469b4..7e9c00a8a412 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -11,6 +11,9 @@
*
*/
+#define ATAG_CORE 0x54410001
+#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
+
.type __switch_data, %object
__switch_data:
.long __mmap_switched
@@ -72,8 +75,13 @@ __error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
bl printascii
+ mov r0, r9
+ bl printhex8
+ adr r0, str_p2
+ bl printascii
b __error
-str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
+str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
+str_p2: .asciz ").\n"
.align
#endif
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 7898cbc9861a..bff4c6e90dd5 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -29,9 +29,6 @@
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)
-#define ATAG_CORE 0x54410001
-#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
-
/*
* swapper_pg_dir is the virtual address of the initial page table.
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index a22a98c43ca5..13e371aad879 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -431,6 +431,11 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0;
}
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
static struct undef_hook kprobes_break_hook = {
.instr_mask = 0xffffffff,
.instr_val = KPROBE_BREAKPOINT_INSTRUCTION,
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c
deleted file mode 100644
index 981fe5c6ccbe..000000000000
--- a/arch/arm/kernel/semaphore.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * ARM semaphore implementation, taken from
- *
- * i386 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Modified for ARM by Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-#include <asm/semaphore.h>
-
-/*
- * Semaphores are implemented using a two-way counter:
- * The "count" variable is decremented for each process
- * that tries to acquire the semaphore, while the "sleeping"
- * variable is a count of such acquires.
- *
- * Notably, the inline "up()" and "down()" functions can
- * efficiently test if they need to do any extra work (up
- * needs to do something only if count was negative before
- * the increment operation.
- *
- * "sleeping" and the contention routine ordering is
- * protected by the semaphore spinlock.
- *
- * Note that these functions are only called when there is
- * contention on the lock, and as such all this is the
- * "non-critical" part of the whole semaphore business. The
- * critical part is the inline stuff in <asm/semaphore.h>
- * where we want to avoid any extra jumps and calls.
- */
-
-/*
- * Logic:
- * - only on a boundary condition do we need to care. When we go
- * from a negative count to a non-negative, we wake people up.
- * - when we go from a non-negative count to a negative do we
- * (a) synchronize with the "sleeper" count and (b) make sure
- * that we're on the wakeup list before we synchronize so that
- * we cannot lose wakeup events.
- */
-
-void __up(struct semaphore *sem)
-{
- wake_up(&sem->wait);
-}
-
-static DEFINE_SPINLOCK(semaphore_lock);
-
-void __sched __down(struct semaphore * sem)
-{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_UNINTERRUPTIBLE;
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
- schedule();
- tsk->state = TASK_UNINTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
- }
- spin_unlock_irq(&semaphore_lock);
- remove_wait_queue(&sem->wait, &wait);
- tsk->state = TASK_RUNNING;
- wake_up(&sem->wait);
-}
-
-int __sched __down_interruptible(struct semaphore * sem)
-{
- int retval = 0;
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
- tsk->state = TASK_INTERRUPTIBLE;
- add_wait_queue_exclusive(&sem->wait, &wait);
-
- spin_lock_irq(&semaphore_lock);
- sem->sleepers ++;
- for (;;) {
- int sleepers = sem->sleepers;
-
- /*
- * With signals pending, this turns into
- * the trylock failure case - we won't be
- * sleeping, and we* can't get the lock as
- * it has contention. Just correct the count
- * and exit.
- */
- if (signal_pending(current)) {
- retval = -EINTR;
- sem->sleepers = 0;
- atomic_add(sleepers, &sem->count);
- break;
- }
-
- /*
- * Add "everybody else" into it. They aren't
- * playing, because we own the spinlock. The
- * "-1" is because we're still hoping to get
- * the lock.
- */
- if (!atomic_add_negative(sleepers - 1, &sem->count)) {
- sem->sleepers = 0;
- break;
- }
- sem->sleepers = 1; /* us - see -1 above */
- spin_unlock_irq(&semaphore_lock);
-
- schedule();
- tsk->state = TASK_INTERRUPTIBLE;
- spin_lock_irq(&semaphore_lock);
- }
- spin_unlock_irq(&semaphore_lock);
- tsk->state = TASK_RUNNING;
- remove_wait_queue(&sem->wait, &wait);
- wake_up(&sem->wait);
- return retval;
-}
-
-/*
- * Trylock failed - make sure we correct for
- * having decremented the count.
- *
- * We could have done the trylock with a
- * single "cmpxchg" without failure cases,
- * but then it wouldn't work on a 386.
- */
-int __down_trylock(struct semaphore * sem)
-{
- int sleepers;
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_lock, flags);
- sleepers = sem->sleepers + 1;
- sem->sleepers = 0;
-
- /*
- * Add "everybody else" and us into it. They aren't
- * playing, because we own the spinlock.
- */
- if (!atomic_add_negative(sleepers, &sem->count))
- wake_up(&sem->wait);
-
- spin_unlock_irqrestore(&semaphore_lock, flags);
- return 1;
-}
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * ip contains the semaphore pointer on entry. Save the C-clobbered
- * registers (r0 to r3 and lr), but not ip, as we use it as a return
- * value in some cases..
- * To remain AAPCS compliant (64-bit stack align) we save r4 as well.
- */
-asm(" .section .sched.text,\"ax\",%progbits \n\
- .align 5 \n\
- .globl __down_failed \n\
-__down_failed: \n\
- stmfd sp!, {r0 - r4, lr} \n\
- mov r0, ip \n\
- bl __down \n\
- ldmfd sp!, {r0 - r4, pc} \n\
- \n\
- .align 5 \n\
- .globl __down_interruptible_failed \n\
-__down_interruptible_failed: \n\
- stmfd sp!, {r0 - r4, lr} \n\
- mov r0, ip \n\
- bl __down_interruptible \n\
- mov ip, r0 \n\
- ldmfd sp!, {r0 - r4, pc} \n\
- \n\
- .align 5 \n\
- .globl __down_trylock_failed \n\
-__down_trylock_failed: \n\
- stmfd sp!, {r0 - r4, lr} \n\
- mov r0, ip \n\
- bl __down_trylock \n\
- mov ip, r0 \n\
- ldmfd sp!, {r0 - r4, pc} \n\
- \n\
- .align 5 \n\
- .globl __up_wakeup \n\
-__up_wakeup: \n\
- stmfd sp!, {r0 - r4, lr} \n\
- mov r0, ip \n\
- bl __up \n\
- ldmfd sp!, {r0 - r4, pc} \n\
- ");
-
-EXPORT_SYMBOL(__down_failed);
-EXPORT_SYMBOL(__down_interruptible_failed);
-EXPORT_SYMBOL(__down_trylock_failed);
-EXPORT_SYMBOL(__up_wakeup);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 54cdf1aeefc3..ef2f86a5e78a 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -26,8 +26,8 @@
/*
* For ARM syscalls, we encode the syscall number into the instruction.
*/
-#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
-#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
+#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
/*
* With EABI, the syscall number has to be loaded into r7.
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index e8b98046895b..96ab5f52949c 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -25,6 +25,7 @@
* sys_stat64:
* sys_lstat64:
* sys_fstat64:
+ * sys_fstatat64:
*
* struct stat64 has different sizes and some members are shifted
* Compatibility wrappers are needed for them and provided below.
@@ -169,6 +170,29 @@ asmlinkage long sys_oabi_fstat64(unsigned long fd,
return error;
}
+asmlinkage long sys_oabi_fstatat64(int dfd,
+ char __user *filename,
+ struct oldabi_stat64 __user *statbuf,
+ int flag)
+{
+ struct kstat stat;
+ int error = -EINVAL;
+
+ if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
+ goto out;
+
+ if (flag & AT_SYMLINK_NOFOLLOW)
+ error = vfs_lstat_fd(dfd, filename, &stat);
+ else
+ error = vfs_stat_fd(dfd, filename, &stat);
+
+ if (!error)
+ error = cp_oldabi_stat64(&stat, statbuf);
+
+out:
+ return error;
+}
+
struct oabi_flock64 {
short l_type;
short l_whence;
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
new file mode 100644
index 000000000000..df3f6b7ebcea
--- /dev/null
+++ b/arch/arm/kernel/thumbee.c
@@ -0,0 +1,81 @@
+/*
+ * arch/arm/kernel/thumbee.c
+ *
+ * Copyright (C) 2008 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/thread_notify.h>
+
+/*
+ * Access to the ThumbEE Handler Base register
+ */
+static inline unsigned long teehbr_read()
+{
+ unsigned long v;
+ asm("mrc p14, 6, %0, c1, c0, 0\n" : "=r" (v));
+ return v;
+}
+
+static inline void teehbr_write(unsigned long v)
+{
+ asm("mcr p14, 6, %0, c1, c0, 0\n" : : "r" (v));
+}
+
+static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void *t)
+{
+ struct thread_info *thread = t;
+
+ switch (cmd) {
+ case THREAD_NOTIFY_FLUSH:
+ thread->thumbee_state = 0;
+ break;
+ case THREAD_NOTIFY_SWITCH:
+ current_thread_info()->thumbee_state = teehbr_read();
+ teehbr_write(thread->thumbee_state);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block thumbee_notifier_block = {
+ .notifier_call = thumbee_notifier,
+};
+
+static int __init thumbee_init(void)
+{
+ unsigned long pfr0;
+ unsigned int cpu_arch = cpu_architecture();
+
+ if (cpu_arch < CPU_ARCH_ARMv7)
+ return 0;
+
+ /* processor feature register 0 */
+ asm("mrc p15, 0, %0, c0, c1, 0\n" : "=r" (pfr0));
+ if ((pfr0 & 0x0000f000) != 0x00001000)
+ return 0;
+
+ printk(KERN_INFO "ThumbEE CPU extension supported.\n");
+ elf_hwcap |= HWCAP_THUMBEE;
+ thread_register_notifier(&thumbee_notifier_block);
+
+ return 0;
+}
+
+late_initcall(thumbee_init);