diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/calls.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 10 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/ftrace.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 1 | ||||
-rw-r--r-- | arch/arm/kernel/kgdb.c | 13 | ||||
-rw-r--r-- | arch/arm/kernel/kprobes.c | 11 | ||||
-rw-r--r-- | arch/arm/kernel/module.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 9 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.c | 60 | ||||
-rw-r--r-- | arch/arm/kernel/ptrace.h | 14 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 93 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/arm/kernel/sys_arm.c | 131 | ||||
-rw-r--r-- | arch/arm/kernel/sys_oabi-compat.c | 3 | ||||
-rw-r--r-- | arch/arm/kernel/unwind.c | 4 |
17 files changed, 154 insertions, 224 deletions
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 9314a2d681f1..37ae301cc47c 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -91,7 +91,7 @@ CALL(sys_settimeofday) /* 80 */ CALL(sys_getgroups16) CALL(sys_setgroups16) - CALL(OBSOLETE(old_select)) /* used by libc4 */ + CALL(OBSOLETE(sys_old_select)) /* used by libc4 */ CALL(sys_symlink) CALL(sys_ni_syscall) /* was sys_lstat */ /* 85 */ CALL(sys_readlink) @@ -99,7 +99,7 @@ CALL(sys_swapon) CALL(sys_reboot) CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */ -/* 90 */ CALL(OBSOLETE(old_mmap)) /* used by libc4 */ +/* 90 */ CALL(OBSOLETE(sys_old_mmap)) /* used by libc4 */ CALL(sys_munmap) CALL(sys_truncate) CALL(sys_ftruncate) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 6c5cf369183b..e6a0fb0f392e 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -523,16 +523,16 @@ ENDPROC(__und_usr) /* * The out of line fixup for the ldrt above. */ - .section .fixup, "ax" + .pushsection .fixup, "ax" 4: mov pc, r9 - .previous - .section __ex_table,"a" + .popsection + .pushsection __ex_table,"a" .long 1b, 4b #if __LINUX_ARM_ARCH__ >= 7 .long 2b, 4b .long 3b, 4b #endif - .previous + .popsection /* * Check whether the instruction is a co-processor instruction. @@ -679,7 +679,7 @@ do_fpe: .data ENTRY(fp_enter) .word no_fp - .previous + .text ENTRY(no_fp) mov pc, lr diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 7e9ed1eea40a..d93f976fb389 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -102,6 +102,8 @@ .else ldmdb sp, {r0 - lr}^ @ get calling r0 - lr .endif + mov r0, r0 @ ARMv5T and earlier require a nop + @ after ldm {}^ add sp, sp, #S_FRAME_SIZE - S_PC movs pc, lr @ return & move spsr_svc into cpsr .endm diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c63842766229..0298286ad4ad 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -62,15 +62,15 @@ int ftrace_modify_code(unsigned long pc, unsigned char *old_code, " movne %0, #2 \n" "3:\n" - ".section .fixup, \"ax\"\n" + ".pushsection .fixup, \"ax\"\n" "4: mov %0, #1 \n" " b 3b \n" - ".previous\n" + ".popsection\n" - ".section __ex_table, \"a\"\n" + ".pushsection __ex_table, \"a\"\n" " .long 1b, 4b \n" " .long 2b, 4b \n" - ".previous\n" + ".popsection\n" : "=r"(err), "=r"(replaced) : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index b7cb45bb91e8..3b3d2c80509c 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -27,7 +27,6 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/slab.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index ba8ccfede964..a5b846b9895d 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -9,6 +9,7 @@ * Authors: George Davis <davis_g@mvista.com> * Deepak Saxena <dsaxena@plexity.net> */ +#include <linux/irq.h> #include <linux/kgdb.h> #include <asm/traps.h> @@ -158,6 +159,18 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { .fn = kgdb_compiled_brk_fn }; +static void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); +} + +void kgdb_roundup_cpus(unsigned long flags) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + /** * kgdb_arch_init - Perform any architecture specific initalization. * diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 60c62c377fa9..2ba7deb3072e 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/stop_machine.h> #include <linux/stringify.h> #include <asm/traps.h> @@ -393,6 +394,14 @@ void __kprobes jprobe_return(void) /* * Setup an empty pt_regs. Fill SP and PC fields as * they're needed by longjmp_break_handler. + * + * We allocate some slack between the original SP and start of + * our fabricated regs. To be precise we want to have worst case + * covered which is STMFD with all 16 regs so we allocate 2 * + * sizeof(struct_pt_regs)). + * + * This is to prevent any simulated instruction from writing + * over the regs when they are accessing the stack. */ "sub sp, %0, %1 \n\t" "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" @@ -410,7 +419,7 @@ void __kprobes jprobe_return(void) "ldmia sp, {r0 - pc} \n\t" : : "r" (kcb->jprobe_saved_regs.ARM_sp), - "I" (sizeof(struct pt_regs)), + "I" (sizeof(struct pt_regs) * 2), "J" (offsetof(struct pt_regs, ARM_sp)), "J" (offsetof(struct pt_regs, ARM_pc)), "J" (offsetof(struct pt_regs, ARM_cpsr)) diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index f28c5e9c51ea..c628bdf6c430 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -16,9 +16,9 @@ #include <linux/mm.h> #include <linux/elf.h> #include <linux/vmalloc.h> -#include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> +#include <linux/gfp.h> #include <asm/pgtable.h> #include <asm/sections.h> diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index c54ceb3d1f97..9e70f2053f9a 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -332,7 +332,8 @@ armpmu_reserve_hardware(void) for (i = 0; i < pmu_irqs->num_irqs; ++i) { err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, - IRQF_DISABLED, "armpmu", NULL); + IRQF_DISABLED | IRQF_NOBALANCING, + "armpmu", NULL); if (err) { pr_warning("unable to request IRQ%d for ARM " "perf counters\n", pmu_irqs->irqs[i]); @@ -965,7 +966,7 @@ armv6pmu_handle_irq(int irq_num, */ armv6_pmcr_write(pmcr); - data.addr = 0; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { @@ -1624,7 +1625,7 @@ enum armv7_counters { /* * EVTSEL: Event selection reg */ -#define ARMV7_EVTSEL_MASK 0x7f /* Mask for writable bits */ +#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ /* * SELECT: Counter selection reg @@ -1945,7 +1946,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) */ regs = get_irq_regs(); - data.addr = 0; + perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index ba2adefa53f7..acf5e6fdb6dc 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -16,7 +16,6 @@ #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/reboot.h> @@ -356,7 +355,7 @@ EXPORT_SYMBOL(dump_fpu); * the thread function, and r3 points to the exit function. */ extern void kernel_thread_helper(void); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_helper, #function\n" "kernel_thread_helper:\n" @@ -364,11 +363,11 @@ asm( ".section .text\n" " mov lr, r3\n" " mov pc, r2\n" " .size kernel_thread_helper, . - kernel_thread_helper\n" -" .previous"); +" .popsection"); #ifdef CONFIG_ARM_UNWIND extern void kernel_thread_exit(long code); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_exit, #function\n" "kernel_thread_exit:\n" @@ -378,7 +377,7 @@ asm( ".section .text\n" " nop\n" " .fnend\n" " .size kernel_thread_exit, . - kernel_thread_exit\n" -" .previous"); +" .popsection"); #else #define kernel_thread_exit do_exit #endif diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 08f899fb76a6..3f562a7c0a99 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -452,12 +452,23 @@ void ptrace_cancel_bpt(struct task_struct *child) clear_breakpoint(child, &child->thread.debug.bp[i]); } +void user_disable_single_step(struct task_struct *task) +{ + task->ptrace &= ~PT_SINGLESTEP; + ptrace_cancel_bpt(task); +} + +void user_enable_single_step(struct task_struct *task) +{ + task->ptrace |= PT_SINGLESTEP; +} + /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { - single_step_disable(child); + user_disable_single_step(child); } /* @@ -753,53 +764,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) ret = ptrace_write_user(child, addr, data); break; - /* - * continue/restart and stop at next (return from) syscall - */ - case PTRACE_SYSCALL: - case PTRACE_CONT: - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - single_step_disable(child); - wake_up_process(child); - ret = 0; - break; - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - single_step_disable(child); - if (child->exit_state != EXIT_ZOMBIE) { - child->exit_code = SIGKILL; - wake_up_process(child); - } - ret = 0; - break; - - /* - * execute single instruction. - */ - case PTRACE_SINGLESTEP: - ret = -EIO; - if (!valid_signal(data)) - break; - single_step_enable(child); - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - case PTRACE_GETREGS: ret = ptrace_getregs(child, (void __user *)data); break; diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h index def3b6184a79..3926605b82ea 100644 --- a/arch/arm/kernel/ptrace.h +++ b/arch/arm/kernel/ptrace.h @@ -14,20 +14,6 @@ extern void ptrace_set_bpt(struct task_struct *); extern void ptrace_break(struct task_struct *, struct pt_regs *); /* - * make sure single-step breakpoint is gone. - */ -static inline void single_step_disable(struct task_struct *task) -{ - task->ptrace &= ~PT_SINGLESTEP; - ptrace_cancel_bpt(task); -} - -static inline void single_step_enable(struct task_struct *task) -{ - task->ptrace |= PT_SINGLESTEP; -} - -/* * Send SIGTRAP if we're single-stepping */ static inline void single_step_trap(struct task_struct *task) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f367eb8..907d5a620bca 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -18,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/unistd.h> +#include <asm/vfp.h> #include "ptrace.h" #include "signal.h" @@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) #endif +#ifdef CONFIG_VFP + +static int preserve_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + const unsigned long magic = VFP_MAGIC; + const unsigned long size = VFP_STORAGE_SIZE; + int err = 0; + + vfp_sync_hwstate(thread); + __put_user_error(magic, &frame->magic, err); + __put_user_error(size, &frame->size, err); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); + __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + return err ? -EFAULT : 0; +} + +static int restore_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + unsigned long magic; + unsigned long size; + unsigned long fpexc; + int err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); + + if (err) + return -EFAULT; + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + h->fpexc = fpexc; + + __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + if (!err) + vfp_flush_hwstate(thread); + + return err ? -EFAULT : 0; +} + +#endif + /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ @@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_restore_state(&sf->aux.vfp); + if (err == 0) + err |= restore_vfp_context(&aux->vfp); #endif return err; @@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_save_state(&sf->aux.vfp); + if (err == 0) + err |= preserve_vfp_context(&aux->vfp); #endif __put_user_error(0, &aux->end_magic, err); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 57162af53dc9..577543f3857f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -99,6 +99,7 @@ int __cpuinit __cpu_up(unsigned int cpu) *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); flush_pmd_entry(pmd); + outer_clean_range(__pa(pmd), __pa(pmd + 1)); /* * We need to tell the secondary core where to find @@ -106,7 +107,8 @@ int __cpuinit __cpu_up(unsigned int cpu) */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; secondary_data.pgdir = virt_to_phys(pgd); - wmb(); + __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); + outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); /* * Now bring the CPU into our world. diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index ae4027bd01bd..c23501842b98 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -15,7 +15,6 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/mm.h> #include <linux/sem.h> #include <linux/msg.h> @@ -27,135 +26,7 @@ #include <linux/file.h> #include <linux/ipc.h> #include <linux/uaccess.h> - -struct mmap_arg_struct { - unsigned long addr; - unsigned long len; - unsigned long prot; - unsigned long flags; - unsigned long fd; - unsigned long offset; -}; - -asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) -{ - int error = -EFAULT; - struct mmap_arg_struct a; - - if (copy_from_user(&a, arg, sizeof(a))) - goto out; - - error = -EINVAL; - if (a.offset & ~PAGE_MASK) - goto out; - - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -out: - return error; -} - -/* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. - */ - -struct sel_arg_struct { - unsigned long n; - fd_set __user *inp, *outp, *exp; - struct timeval __user *tvp; -}; - -asmlinkage int old_select(struct sel_arg_struct __user *arg) -{ - struct sel_arg_struct a; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - /* sys_select() does the appropriate kernel locking */ - return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); -} - -#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) -/* - * sys_ipc() is the de-multiplexer for the SysV IPC calls.. - * - * This is really horribly ugly. - */ -asmlinkage int sys_ipc(uint call, int first, int second, int third, - void __user *ptr, long fifth) -{ - int version, ret; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - switch (call) { - case SEMOP: - return sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL); - case SEMTIMEDOP: - return sys_semtimedop(first, (struct sembuf __user *)ptr, second, - (const struct timespec __user *)fifth); - - case SEMGET: - return sys_semget (first, second, third); - case SEMCTL: { - union semun fourth; - if (!ptr) - return -EINVAL; - if (get_user(fourth.__pad, (void __user * __user *) ptr)) - return -EFAULT; - return sys_semctl (first, second, third, fourth); - } - - case MSGSND: - return sys_msgsnd(first, (struct msgbuf __user *) ptr, - second, third); - case MSGRCV: - switch (version) { - case 0: { - struct ipc_kludge tmp; - if (!ptr) - return -EINVAL; - if (copy_from_user(&tmp,(struct ipc_kludge __user *)ptr, - sizeof (tmp))) - return -EFAULT; - return sys_msgrcv (first, tmp.msgp, second, - tmp.msgtyp, third); - } - default: - return sys_msgrcv (first, - (struct msgbuf __user *) ptr, - second, fifth, third); - } - case MSGGET: - return sys_msgget ((key_t) first, second); - case MSGCTL: - return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); - - case SHMAT: - switch (version) { - default: { - ulong raddr; - ret = do_shmat(first, (char __user *)ptr, second, &raddr); - if (ret) - return ret; - return put_user(raddr, (ulong __user *)third); - } - case 1: /* Of course, we don't support iBCS2! */ - return -EINVAL; - } - case SHMDT: - return sys_shmdt ((char __user *)ptr); - case SHMGET: - return sys_shmget (first, second, third); - case SHMCTL: - return sys_shmctl (first, second, - (struct shmid_ds __user *) ptr); - default: - return -ENOSYS; - } -} -#endif +#include <linux/slab.h> /* Fork a new task - this creates a new program thread. * This is called indirectly via a small wrapper diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index d59a0cd537f0..33ff678e32f2 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -346,9 +346,6 @@ asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, return sys_oabi_semtimedop(semid, tsops, nsops, NULL); } -extern asmlinkage int sys_ipc(uint call, int first, int second, int third, - void __user *ptr, long fifth); - asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, void __user *ptr, long fifth) { diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 786ac2b6914a..50292cd9c120 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -359,7 +359,9 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.fp = regs->ARM_fp; frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; - frame.pc = regs->ARM_pc; + /* PC might be corrupted, use LR in that case. */ + frame.pc = kernel_text_address(regs->ARM_pc) + ? regs->ARM_pc : regs->ARM_lr; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; |