summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/cpu-probe.c62
-rw-r--r--arch/mips/kernel/irixsig.c63
-rw-r--r--arch/mips/kernel/linux32.c10
-rw-r--r--arch/mips/kernel/process.c257
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S6
-rw-r--r--arch/mips/kernel/scall64-o32.S4
-rw-r--r--arch/mips/kernel/setup.c439
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mips/kernel/signal32.c7
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smtc-asm.S2
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/kernel/traps.c146
-rw-r--r--arch/mips/kernel/vpe.c6
16 files changed, 561 insertions, 459 deletions
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index aa2caa67299a..9fbf8430c849 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -38,15 +38,40 @@ static void r3081_wait(void)
static void r39xx_wait(void)
{
- unsigned long cfg = read_c0_conf();
- write_c0_conf(cfg | TX39_CONF_HALT);
+ local_irq_disable();
+ if (!need_resched())
+ write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+ local_irq_enable();
}
+/*
+ * There is a race when WAIT instruction executed with interrupt
+ * enabled.
+ * But it is implementation-dependent wheter the pipelie restarts when
+ * a non-enabled interrupt is requested.
+ */
static void r4k_wait(void)
{
- __asm__(".set\tmips3\n\t"
- "wait\n\t"
- ".set\tmips0");
+ __asm__(" .set mips3 \n"
+ " wait \n"
+ " .set mips0 \n");
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+static void r4k_wait_irqoff(void)
+{
+ local_irq_disable();
+ if (!need_resched())
+ __asm__(" .set mips3 \n"
+ " wait \n"
+ " .set mips0 \n");
+ local_irq_enable();
}
/* The Au1xxx wait is available only if using 32khz counter or
@@ -56,17 +81,17 @@ int allow_au1k_wait;
static void au1k_wait(void)
{
/* using the wait instruction makes CP0 counter unusable */
- __asm__(".set mips3\n\t"
- "cache 0x14, 0(%0)\n\t"
- "cache 0x14, 32(%0)\n\t"
- "sync\n\t"
- "nop\n\t"
- "wait\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n\t"
- ".set mips0\n\t"
+ __asm__(" .set mips3 \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " sync \n"
+ " nop \n"
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set mips0 \n"
: : "r" (au1k_wait));
}
@@ -111,7 +136,6 @@ static inline void check_wait(void)
case CPU_NEVADA:
case CPU_RM7000:
case CPU_RM9000:
- case CPU_TX49XX:
case CPU_4KC:
case CPU_4KEC:
case CPU_4KSC:
@@ -125,6 +149,10 @@ static inline void check_wait(void)
cpu_wait = r4k_wait;
printk(" available.\n");
break;
+ case CPU_TX49XX:
+ cpu_wait = r4k_wait_irqoff;
+ printk(" available.\n");
+ break;
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 676e868d26fb..2132485caa74 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -17,6 +17,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
+#include <asm/unistd.h>
#undef DEBUG_SIG
@@ -172,11 +173,12 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
return ret;
}
-asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
+void do_irix_signal(struct pt_regs *regs)
{
struct k_sigaction ka;
siginfo_t info;
int signr;
+ sigset_t *oldset;
/*
* We want the common case to go fast, which is why we may in certain
@@ -184,19 +186,28 @@ asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
* if so.
*/
if (!user_mode(regs))
- return 1;
+ return;
- if (try_to_freeze())
- goto no_signal;
-
- if (!oldset)
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0)
- return handle_signal(signr, &info, &ka, oldset, regs);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
+
+ return;
+ }
-no_signal:
/*
* Who's code doesn't conform to the restartable syscall convention
* dies here!!! The li instruction, a single machine instruction,
@@ -208,8 +219,22 @@ no_signal:
regs->regs[2] == ERESTARTNOINTR) {
regs->cp0_epc -= 8;
}
+ if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
+ regs->regs[2] = __NR_restart_syscall;
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 4;
+ }
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
- return 0;
}
asmlinkage void
@@ -298,6 +323,9 @@ struct sigact_irix5 {
int _unused0[2];
};
+#define SIG_SETMASK32 256 /* Goodie from SGI for BSD compatibility:
+ set only the low 32 bit of the sigset. */
+
#ifdef DEBUG_SIG
static inline void dump_sigact_irix5(struct sigact_irix5 *p)
{
@@ -413,7 +441,7 @@ asmlinkage int irix_sigprocmask(int how, irix_sigset_t __user *new,
asmlinkage int irix_sigsuspend(struct pt_regs *regs)
{
- sigset_t saveset, newset;
+ sigset_t newset;
sigset_t __user *uset;
uset = (sigset_t __user *) regs->regs[4];
@@ -422,18 +450,15 @@ asmlinkage int irix_sigsuspend(struct pt_regs *regs)
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
+ current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- regs->regs[2] = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_irix_signal(&saveset, regs))
- return -EINTR;
- }
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ return -ERESTARTNOHAND;
}
/* hate hate hate... */
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 450ac592da57..43b1162d714f 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -991,7 +991,7 @@ struct sysctl_args32
unsigned int __unused[4];
};
-#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_SYSCTL_SYSCALL
asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
{
@@ -1032,7 +1032,7 @@ asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
return error;
}
-#endif /* CONFIG_SYSCTL */
+#endif /* CONFIG_SYSCTL_SYSCALL */
asmlinkage long sys32_newuname(struct new_utsname __user * name)
{
@@ -1296,9 +1296,3 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs)
return do_fork(clone_flags, newsp, &regs, 0,
parent_tidptr, child_tidptr);
}
-
-extern asmlinkage void sys_set_thread_area(u32 addr);
-asmlinkage void sys32_set_thread_area(u32 addr)
-{
- sys_set_thread_area(AA(addr));
-}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 7ab67f786bfe..2613a0dd4b82 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -273,104 +273,107 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
-static struct mips_frame_info {
- void *func;
- unsigned long func_size;
- int frame_size;
- int pc_offset;
-} *schedule_frame, mfinfo[64];
-static int mfinfo_num;
-
-static int __init get_frame_info(struct mips_frame_info *info)
+/*
+ *
+ */
+struct mips_frame_info {
+ void *func;
+ unsigned long func_size;
+ int frame_size;
+ int pc_offset;
+};
+
+static inline int is_ra_save_ins(union mips_instruction *ip)
{
- int i;
- void *func = info->func;
- union mips_instruction *ip = (union mips_instruction *)func;
+ /* sw / sd $ra, offset($sp) */
+ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
+ ip->i_format.rs == 29 &&
+ ip->i_format.rt == 31;
+}
+
+static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
+{
+ if (ip->j_format.opcode == jal_op)
+ return 1;
+ if (ip->r_format.opcode != spec_op)
+ return 0;
+ return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+}
+
+static inline int is_sp_move_ins(union mips_instruction *ip)
+{
+ /* addiu/daddiu sp,sp,-imm */
+ if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
+ return 0;
+ if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
+ return 1;
+ return 0;
+}
+
+static int get_frame_info(struct mips_frame_info *info)
+{
+ union mips_instruction *ip = info->func;
+ unsigned max_insns = info->func_size / sizeof(union mips_instruction);
+ unsigned i;
+
info->pc_offset = -1;
info->frame_size = 0;
- for (i = 0; i < 128; i++, ip++) {
- /* if jal, jalr, jr, stop. */
- if (ip->j_format.opcode == jal_op ||
- (ip->r_format.opcode == spec_op &&
- (ip->r_format.func == jalr_op ||
- ip->r_format.func == jr_op)))
- break;
- if (info->func_size && i >= info->func_size / 4)
+ if (!ip)
+ goto err;
+
+ if (max_insns == 0)
+ max_insns = 128U; /* unknown function size */
+ max_insns = min(128U, max_insns);
+
+ for (i = 0; i < max_insns; i++, ip++) {
+
+ if (is_jal_jalr_jr_ins(ip))
break;
- if (
-#ifdef CONFIG_32BIT
- ip->i_format.opcode == addiu_op &&
-#endif
-#ifdef CONFIG_64BIT
- ip->i_format.opcode == daddiu_op &&
-#endif
- ip->i_format.rs == 29 &&
- ip->i_format.rt == 29) {
- /* addiu/daddiu sp,sp,-imm */
- if (info->frame_size)
- continue;
- info->frame_size = - ip->i_format.simmediate;
+ if (!info->frame_size) {
+ if (is_sp_move_ins(ip))
+ info->frame_size = - ip->i_format.simmediate;
+ continue;
}
-
- if (
-#ifdef CONFIG_32BIT
- ip->i_format.opcode == sw_op &&
-#endif
-#ifdef CONFIG_64BIT
- ip->i_format.opcode == sd_op &&
-#endif
- ip->i_format.rs == 29 &&
- ip->i_format.rt == 31) {
- /* sw / sd $ra, offset($sp) */
- if (info->pc_offset != -1)
- continue;
+ if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
info->pc_offset =
ip->i_format.simmediate / sizeof(long);
+ break;
}
}
- if (info->pc_offset == -1 || info->frame_size == 0) {
- if (func == schedule)
- printk("Can't analyze prologue code at %p\n", func);
- info->pc_offset = -1;
- info->frame_size = 0;
- }
-
- return 0;
+ if (info->frame_size && info->pc_offset >= 0) /* nested */
+ return 0;
+ if (info->pc_offset < 0) /* leaf */
+ return 1;
+ /* prologue seems boggus... */
+err:
+ return -1;
}
+static struct mips_frame_info schedule_mfi __read_mostly;
+
static int __init frame_info_init(void)
{
- int i;
+ unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
+ unsigned long ofs;
char *modname;
char namebuf[KSYM_NAME_LEN + 1];
- unsigned long start, size, ofs;
- extern char __sched_text_start[], __sched_text_end[];
- extern char __lock_text_start[], __lock_text_end[];
-
- start = (unsigned long)__sched_text_start;
- for (i = 0; i < ARRAY_SIZE(mfinfo); i++) {
- if (start == (unsigned long)schedule)
- schedule_frame = &mfinfo[i];
- if (!kallsyms_lookup(start, &size, &ofs, &modname, namebuf))
- break;
- mfinfo[i].func = (void *)(start + ofs);
- mfinfo[i].func_size = size;
- start += size - ofs;
- if (start >= (unsigned long)__lock_text_end)
- break;
- if (start == (unsigned long)__sched_text_end)
- start = (unsigned long)__lock_text_start;
- }
-#else
- mfinfo[0].func = schedule;
- schedule_frame = &mfinfo[0];
+
+ kallsyms_lookup((unsigned long)schedule, &size, &ofs, &modname, namebuf);
#endif
- for (i = 0; i < ARRAY_SIZE(mfinfo) && mfinfo[i].func; i++)
- get_frame_info(&mfinfo[i]);
+ schedule_mfi.func = schedule;
+ schedule_mfi.func_size = size;
+
+ get_frame_info(&schedule_mfi);
+
+ /*
+ * Without schedule() frame info, result given by
+ * thread_saved_pc() and get_wchan() are not reliable.
+ */
+ if (schedule_mfi.pc_offset < 0)
+ printk("Can't analyze schedule() prologue at %p\n", schedule);
- mfinfo_num = i;
return 0;
}
@@ -386,54 +389,86 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
/* New born processes are a special case */
if (t->reg31 == (unsigned long) ret_from_fork)
return t->reg31;
-
- if (!schedule_frame || schedule_frame->pc_offset < 0)
+ if (schedule_mfi.pc_offset < 0)
return 0;
- return ((unsigned long *)t->reg29)[schedule_frame->pc_offset];
+ return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
}
-/* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */
-unsigned long get_wchan(struct task_struct *p)
+
+#ifdef CONFIG_KALLSYMS
+/* used by show_backtrace() */
+unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long ra)
{
unsigned long stack_page;
- unsigned long pc;
-#ifdef CONFIG_KALLSYMS
- unsigned long frame;
-#endif
+ struct mips_frame_info info;
+ char *modname;
+ char namebuf[KSYM_NAME_LEN + 1];
+ unsigned long size, ofs;
+ int leaf;
- if (!p || p == current || p->state == TASK_RUNNING)
+ stack_page = (unsigned long)task_stack_page(task);
+ if (!stack_page)
return 0;
- stack_page = (unsigned long)task_stack_page(p);
- if (!stack_page || !mfinfo_num)
+ if (!kallsyms_lookup(pc, &size, &ofs, &modname, namebuf))
+ return 0;
+ /*
+ * Return ra if an exception occured at the first instruction
+ */
+ if (unlikely(ofs == 0))
+ return ra;
+
+ info.func = (void *)(pc - ofs);
+ info.func_size = ofs; /* analyze from start to ofs */
+ leaf = get_frame_info(&info);
+ if (leaf < 0)
+ return 0;
+
+ if (*sp < stack_page ||
+ *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
return 0;
- pc = thread_saved_pc(p);
+ if (leaf)
+ /*
+ * For some extreme cases, get_frame_info() can
+ * consider wrongly a nested function as a leaf
+ * one. In that cases avoid to return always the
+ * same value.
+ */
+ pc = pc != ra ? ra : 0;
+ else
+ pc = ((unsigned long *)(*sp))[info.pc_offset];
+
+ *sp += info.frame_size;
+ return __kernel_text_address(pc) ? pc : 0;
+}
+#endif
+
+/*
+ * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
+ */
+unsigned long get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
- if (!in_sched_functions(pc))
- return pc;
+ unsigned long sp;
+#endif
- frame = p->thread.reg29 + schedule_frame->frame_size;
- do {
- int i;
+ if (!task || task == current || task->state == TASK_RUNNING)
+ goto out;
+ if (!task_stack_page(task))
+ goto out;
- if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32)
- return 0;
+ pc = thread_saved_pc(task);
- for (i = mfinfo_num - 1; i >= 0; i--) {
- if (pc >= (unsigned long) mfinfo[i].func)
- break;
- }
- if (i < 0)
- break;
+#ifdef CONFIG_KALLSYMS
+ sp = task->thread.reg29 + schedule_mfi.frame_size;
- pc = ((unsigned long *)frame)[mfinfo[i].pc_offset];
- if (!mfinfo[i].frame_size)
- break;
- frame += mfinfo[i].frame_size;
- } while (in_sched_functions(pc));
+ while (in_sched_functions(pc))
+ pc = unwind_stack(task, &sp, pc, 0);
#endif
+out:
return pc;
}
-
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index ba1bcd83c7d3..e71785102206 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -662,6 +662,8 @@ einval: li v0, -EINVAL
sys sys_tee 4
sys sys_vmsplice 4
sys sys_move_pages 6
+ sys sys_set_robust_list 2
+ sys sys_get_robust_list 3
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 939e172db953..4c22d0b4825d 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -466,3 +466,5 @@ sys_call_table:
PTR sys_tee /* 5265 */
PTR sys_vmsplice
PTR sys_move_pages
+ PTR sys_set_robust_list
+ PTR sys_get_robust_list
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 98abbc5a9f13..f25c2a2f1038 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -247,7 +247,7 @@ EXPORT(sysn32_call_table)
PTR sys_capset
PTR sys32_rt_sigpending /* 6125 */
PTR compat_sys_rt_sigtimedwait
- PTR sys_rt_sigqueueinfo
+ PTR sys32_rt_sigqueueinfo
PTR sysn32_rt_sigsuspend
PTR sys32_sigaltstack
PTR compat_sys_utime /* 6130 */
@@ -390,5 +390,7 @@ EXPORT(sysn32_call_table)
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee
- PTR sys_vmsplice /* 6271 */
+ PTR sys_vmsplice /* 6270 */
PTR sys_move_pages
+ PTR compat_sys_set_robust_list
+ PTR compat_sys_get_robust_list
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 505c9ee54009..288ee4ac4dbb 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -498,7 +498,7 @@ sys_call_table:
PTR sys_mknodat /* 4290 */
PTR sys_fchownat
PTR compat_sys_futimesat
- PTR compat_sys_newfstatat
+ PTR sys_newfstatat
PTR sys_unlinkat
PTR sys_renameat /* 4295 */
PTR sys_linkat
@@ -514,4 +514,6 @@ sys_call_table:
PTR sys_tee
PTR sys_vmsplice
PTR compat_sys_move_pages
+ PTR compat_sys_set_robust_list
+ PTR compat_sys_get_robust_list /* 4310 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8c2b596a136f..fdbb508661c5 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -10,29 +10,15 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000 2001, 2002 Maciej W. Rozycki
*/
-#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/utsname.h>
-#include <linux/a.out.h>
#include <linux/screen_info.h>
#include <linux/bootmem.h>
#include <linux/initrd.h>
-#include <linux/major.h>
-#include <linux/kdev_t.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/console.h>
-#include <linux/mmzone.h>
#include <linux/pfn.h>
#include <asm/addrspace.h>
@@ -96,6 +82,12 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
int x = boot_mem_map.nr_map;
struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
+ /* Sanity check */
+ if (start + size < start) {
+ printk("Trying to add an invalid memory region, skipped\n");
+ return;
+ }
+
/*
* Try to merge with previous entry if any. This is far less than
* perfect but is sufficient for most real world cases.
@@ -143,167 +135,132 @@ static void __init print_memory_map(void)
}
}
-static inline void parse_cmdline_early(void)
+/*
+ * Manage initrd
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init rd_start_early(char *p)
{
- char c = ' ', *to = command_line, *from = saved_command_line;
- unsigned long start_at, mem_size;
- int len = 0;
- int usermem = 0;
+ unsigned long start = memparse(p, &p);
- printk("Determined physical RAM map:\n");
- print_memory_map();
+#ifdef CONFIG_64BIT
+ /* HACK: Guess if the sign extension was forgotten */
+ if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
+ start |= 0xffffffff00000000UL;
+#endif
+ initrd_start = start;
+ initrd_end += start;
- for (;;) {
- /*
- * "mem=XXX[kKmM]" defines a memory region from
- * 0 to <XXX>, overriding the determined size.
- * "mem=XXX[KkmM]@YYY[KkmM]" defines a memory region from
- * <YYY> to <YYY>+<XXX>, overriding the determined size.
- */
- if (c == ' ' && !memcmp(from, "mem=", 4)) {
- if (to != command_line)
- to--;
- /*
- * If a user specifies memory size, we
- * blow away any automatically generated
- * size.
- */
- if (usermem == 0) {
- boot_mem_map.nr_map = 0;
- usermem = 1;
- }
- mem_size = memparse(from + 4, &from);
- if (*from == '@')
- start_at = memparse(from + 1, &from);
- else
- start_at = 0;
- add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
- }
- c = *(from++);
- if (!c)
- break;
- if (CL_SIZE <= ++len)
- break;
- *(to++) = c;
- }
- *to = '\0';
+ return 0;
+}
+early_param("rd_start", rd_start_early);
- if (usermem) {
- printk("User-defined physical RAM map:\n");
- print_memory_map();
- }
+static int __init rd_size_early(char *p)
+{
+ initrd_end += memparse(p, &p);
+
+ return 0;
}
+early_param("rd_size", rd_size_early);
-static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_end)
+static unsigned long __init init_initrd(void)
{
+ unsigned long tmp, end, size;
+ u32 *initrd_header;
+
+ ROOT_DEV = Root_RAM0;
+
/*
- * "rd_start=0xNNNNNNNN" defines the memory address of an initrd
- * "rd_size=0xNN" it's size
+ * Board specific code or command line parser should have
+ * already set up initrd_start and initrd_end. In these cases
+ * perfom sanity checks and use them if all looks good.
*/
- unsigned long start = 0;
- unsigned long size = 0;
- unsigned long end;
- char cmd_line[CL_SIZE];
- char *start_str;
- char *size_str;
- char *tmp;
-
- strcpy(cmd_line, command_line);
- *command_line = 0;
- tmp = cmd_line;
- /* Ignore "rd_start=" strings in other parameters. */
- start_str = strstr(cmd_line, "rd_start=");
- if (start_str && start_str != cmd_line && *(start_str - 1) != ' ')
- start_str = strstr(start_str, " rd_start=");
- while (start_str) {
- if (start_str != cmd_line)
- strncat(command_line, tmp, start_str - tmp);
- start = memparse(start_str + 9, &start_str);
- tmp = start_str + 1;
- start_str = strstr(start_str, " rd_start=");
+ size = initrd_end - initrd_start;
+ if (initrd_end == 0 || size == 0) {
+ initrd_start = 0;
+ initrd_end = 0;
+ } else
+ return initrd_end;
+
+ end = (unsigned long)&_end;
+ tmp = PAGE_ALIGN(end) - sizeof(u32) * 2;
+ if (tmp < end)
+ tmp += PAGE_SIZE;
+
+ initrd_header = (u32 *)tmp;
+ if (initrd_header[0] == 0x494E5244) {
+ initrd_start = (unsigned long)&initrd_header[2];
+ initrd_end = initrd_start + initrd_header[1];
}
- if (*tmp)
- strcat(command_line, tmp);
-
- strcpy(cmd_line, command_line);
- *command_line = 0;
- tmp = cmd_line;
- /* Ignore "rd_size" strings in other parameters. */
- size_str = strstr(cmd_line, "rd_size=");
- if (size_str && size_str != cmd_line && *(size_str - 1) != ' ')
- size_str = strstr(size_str, " rd_size=");
- while (size_str) {
- if (size_str != cmd_line)
- strncat(command_line, tmp, size_str - tmp);
- size = memparse(size_str + 8, &size_str);
- tmp = size_str + 1;
- size_str = strstr(size_str, " rd_size=");
+ return initrd_end;
+}
+
+static void __init finalize_initrd(void)
+{
+ unsigned long size = initrd_end - initrd_start;
+
+ if (size == 0) {
+ printk(KERN_INFO "Initrd not found or empty");
+ goto disable;
+ }
+ if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ printk("Initrd extends beyond end of memory");
+ goto disable;
}
- if (*tmp)
- strcat(command_line, tmp);
-#ifdef CONFIG_64BIT
- /* HACK: Guess if the sign extension was forgotten */
- if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
- start |= 0xffffffff00000000UL;
+ reserve_bootmem(CPHYSADDR(initrd_start), size);
+ initrd_below_start_ok = 1;
+
+ printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
+ initrd_start, size);
+ return;
+disable:
+ printk(" - disabling initrd\n");
+ initrd_start = 0;
+ initrd_end = 0;
+}
+
+#else /* !CONFIG_BLK_DEV_INITRD */
+
+#define init_initrd() 0
+#define finalize_initrd() do {} while (0)
+
#endif
- end = start + size;
- if (start && end) {
- *rd_start = start;
- *rd_end = end;
- return 1;
- }
- return 0;
+/*
+ * Initialize the bootmem allocator. It also setup initrd related data
+ * if needed.
+ */
+#ifdef CONFIG_SGI_IP27
+
+static void __init bootmem_init(void)
+{
+ init_initrd();
+ finalize_initrd();
}
-#define MAXMEM HIGHMEM_START
-#define MAXMEM_PFN PFN_DOWN(MAXMEM)
+#else /* !CONFIG_SGI_IP27 */
-static inline void bootmem_init(void)
+static void __init bootmem_init(void)
{
- unsigned long start_pfn;
- unsigned long reserved_end = (unsigned long)&_end;
-#ifndef CONFIG_SGI_IP27
- unsigned long first_usable_pfn;
+ unsigned long reserved_end;
+ unsigned long highest = 0;
+ unsigned long mapstart = -1UL;
unsigned long bootmap_size;
int i;
-#endif
-#ifdef CONFIG_BLK_DEV_INITRD
- int initrd_reserve_bootmem = 0;
-
- /* Board specific code should have set up initrd_start and initrd_end */
- ROOT_DEV = Root_RAM0;
- if (parse_rd_cmdline(&initrd_start, &initrd_end)) {
- reserved_end = max(reserved_end, initrd_end);
- initrd_reserve_bootmem = 1;
- } else {
- unsigned long tmp;
- u32 *initrd_header;
-
- tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - sizeof(u32) * 2;
- if (tmp < reserved_end)
- tmp += PAGE_SIZE;
- initrd_header = (u32 *)tmp;
- if (initrd_header[0] == 0x494E5244) {
- initrd_start = (unsigned long)&initrd_header[2];
- initrd_end = initrd_start + initrd_header[1];
- reserved_end = max(reserved_end, initrd_end);
- initrd_reserve_bootmem = 1;
- }
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
/*
- * Partially used pages are not usable - thus
- * we are rounding upwards.
+ * Init any data related to initrd. It's a nop if INITRD is
+ * not selected. Once that done we can determine the low bound
+ * of usable memory.
*/
- start_pfn = PFN_UP(CPHYSADDR(reserved_end));
+ reserved_end = init_initrd();
+ reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end)));
-#ifndef CONFIG_SGI_IP27
- /* Find the highest page frame number we have available. */
- max_pfn = 0;
- first_usable_pfn = -1UL;
+ /*
+ * Find the highest page frame number we have available.
+ */
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -312,56 +269,38 @@ static inline void bootmem_init(void)
start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
- + boot_mem_map.map[i].size);
+ + boot_mem_map.map[i].size);
- if (start >= end)
+ if (end > highest)
+ highest = end;
+ if (end <= reserved_end)
continue;
- if (end > max_pfn)
- max_pfn = end;
- if (start < first_usable_pfn) {
- if (start > start_pfn) {
- first_usable_pfn = start;
- } else if (end > start_pfn) {
- first_usable_pfn = start_pfn;
- }
- }
+ if (start >= mapstart)
+ continue;
+ mapstart = max(reserved_end, start);
}
/*
* Determine low and high memory ranges
*/
- max_low_pfn = max_pfn;
- if (max_low_pfn > MAXMEM_PFN) {
- max_low_pfn = MAXMEM_PFN;
-#ifndef CONFIG_HIGHMEM
- /* Maximum memory usable is what is directly addressable */
- printk(KERN_WARNING "Warning only %ldMB will be used.\n",
- MAXMEM >> 20);
- printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+ if (highest > PFN_DOWN(HIGHMEM_START)) {
+#ifdef CONFIG_HIGHMEM
+ highstart_pfn = PFN_DOWN(HIGHMEM_START);
+ highend_pfn = highest;
#endif
+ highest = PFN_DOWN(HIGHMEM_START);
}
-#ifdef CONFIG_HIGHMEM
/*
- * Crude, we really should make a better attempt at detecting
- * highstart_pfn
+ * Initialize the boot-time allocator with low memory only.
*/
- highstart_pfn = highend_pfn = max_pfn;
- if (max_pfn > MAXMEM_PFN) {
- highstart_pfn = MAXMEM_PFN;
- printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
- (highend_pfn - highstart_pfn) >> (20 - PAGE_SHIFT));
- }
-#endif
-
- /* Initialize the boot-time allocator with low memory only. */
- bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
+ bootmap_size = init_bootmem(mapstart, highest);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
- unsigned long curr_pfn, last_pfn, size;
+ unsigned long start, end, size;
/*
* Reserve usable memory.
@@ -369,85 +308,50 @@ static inline void bootmem_init(void)
if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
continue;
- /*
- * We are rounding up the start address of usable memory:
- */
- curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
- if (curr_pfn >= max_low_pfn)
- continue;
- if (curr_pfn < start_pfn)
- curr_pfn = start_pfn;
-
- /*
- * ... and at the end of the usable range downwards:
- */
- last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
+ start = PFN_UP(boot_mem_map.map[i].addr);
+ end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
-
- if (last_pfn > max_low_pfn)
- last_pfn = max_low_pfn;
-
/*
- * Only register lowmem part of lowmem segment with bootmem.
+ * We are rounding up the start address of usable memory
+ * and at the end of the usable range downwards.
*/
- size = last_pfn - curr_pfn;
- if (curr_pfn > PFN_DOWN(HIGHMEM_START))
- continue;
- if (curr_pfn + size - 1 > PFN_DOWN(HIGHMEM_START))
- size = PFN_DOWN(HIGHMEM_START) - curr_pfn;
- if (!size)
+ if (start >= max_low_pfn)
continue;
+ if (start < reserved_end)
+ start = reserved_end;
+ if (end > max_low_pfn)
+ end = max_low_pfn;
/*
- * ... finally, did all the rounding and playing
- * around just make the area go away?
+ * ... finally, is the area going away?
*/
- if (last_pfn <= curr_pfn)
+ if (end <= start)
continue;
+ size = end - start;
/* Register lowmem ranges */
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
- memory_present(0, curr_pfn, curr_pfn + size - 1);
+ free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
+ memory_present(0, start, end);
}
- /* Reserve the bootmap memory. */
- reserve_bootmem(PFN_PHYS(first_usable_pfn), bootmap_size);
-#endif /* CONFIG_SGI_IP27 */
-
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_below_start_ok = 1;
- if (initrd_start) {
- unsigned long initrd_size = ((unsigned char *)initrd_end) -
- ((unsigned char *)initrd_start);
- const int width = sizeof(long) * 2;
-
- printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *)initrd_start, initrd_size);
-
- if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
- printk("initrd extends beyond end of memory "
- "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
- width,
- (unsigned long long) CPHYSADDR(initrd_end),
- width,
- (unsigned long long) PFN_PHYS(max_low_pfn));
- initrd_start = initrd_end = 0;
- initrd_reserve_bootmem = 0;
- }
+ /*
+ * Reserve the bootmap memory.
+ */
+ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size);
- if (initrd_reserve_bootmem)
- reserve_bootmem(CPHYSADDR(initrd_start), initrd_size);
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
+ /*
+ * Reserve initrd memory if needed.
+ */
+ finalize_initrd();
}
+#endif /* CONFIG_SGI_IP27 */
+
/*
* arch_mem_init - initialize memory managment subsystem
*
* o plat_mem_setup() detects the memory configuration and will record detected
* memory areas using add_memory_region.
- * o parse_cmdline_early() parses the command line for mem= options which,
- * iff detected, will override the results of the automatic detection.
*
* At this stage the memory configuration of the system is known to the
* kernel but generic memory managment system is still entirely uninitialized.
@@ -465,25 +369,59 @@ static inline void bootmem_init(void)
* initialization hook for anything else was introduced.
*/
-extern void plat_mem_setup(void);
+static int usermem __initdata = 0;
+
+static int __init early_parse_mem(char *p)
+{
+ unsigned long start, size;
+
+ /*
+ * If a user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ boot_mem_map.nr_map = 0;
+ usermem = 1;
+ }
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+
+ add_memory_region(start, size, BOOT_MEM_RAM);
+ return 0;
+}
+early_param("mem", early_parse_mem);
static void __init arch_mem_init(char **cmdline_p)
{
+ extern void plat_mem_setup(void);
+
/* call board setup routine */
plat_mem_setup();
+ printk("Determined physical RAM map:\n");
+ print_memory_map();
+
strlcpy(command_line, arcs_cmdline, sizeof(command_line));
strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
- parse_cmdline_early();
+ parse_early_param();
+
+ if (usermem) {
+ printk("User-defined physical RAM map:\n");
+ print_memory_map();
+ }
+
bootmem_init();
sparse_init();
paging_init();
}
-static inline void resource_init(void)
+static void __init resource_init(void)
{
int i;
@@ -504,10 +442,10 @@ static inline void resource_init(void)
start = boot_mem_map.map[i].addr;
end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
- if (start >= MAXMEM)
+ if (start >= HIGHMEM_START)
continue;
- if (end >= MAXMEM)
- end = MAXMEM - 1;
+ if (end >= HIGHMEM_START)
+ end = HIGHMEM_START - 1;
res = alloc_bootmem(sizeof(struct resource));
switch (boot_mem_map.map[i].type) {
@@ -536,9 +474,6 @@ static inline void resource_init(void)
}
}
-#undef MAXMEM
-#undef MAXMEM_PFN
-
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6b4d9be31615..b9d358e05214 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -424,15 +424,11 @@ void do_signal(struct pt_regs *regs)
if (!user_mode(regs))
return;
- if (try_to_freeze())
- goto no_signal;
-
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
-
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
@@ -446,9 +442,10 @@ void do_signal(struct pt_regs *regs)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+
+ return;
}
-no_signal:
/*
* Who's code doesn't conform to the restartable syscall convention
* dies here!!! The li instruction, a single machine instruction,
@@ -466,6 +463,7 @@ no_signal:
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index f32a22997c3d..c86a5ddff050 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -815,9 +815,6 @@ void do_signal32(struct pt_regs *regs)
if (!user_mode(regs))
return;
- if (try_to_freeze())
- goto no_signal;
-
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
@@ -836,9 +833,10 @@ void do_signal32(struct pt_regs *regs)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+
+ return;
}
-no_signal:
/*
* Who's code doesn't conform to the restartable syscall convention
* dies here!!! The li instruction, a single machine instruction,
@@ -856,6 +854,7 @@ no_signal:
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 93429a4d3012..766253c44f3f 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -203,7 +203,7 @@ void plat_smp_setup(void)
write_vpe_c0_config( read_c0_config());
/* make sure there are no software interrupts pending */
- write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
+ write_vpe_c0_cause(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 4cc3dea36612..76cb31d57482 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -8,7 +8,7 @@
#include <asm/regdef.h>
#include <asm/asmmacro.h>
#include <asm/stackframe.h>
-#include <asm/stackframe.h>
+#include <asm/irqflags.h>
/*
* "Software Interrupt" linkage.
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 0721314db657..9951240cc3fd 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -263,7 +263,7 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
return error;
}
-void sys_set_thread_area(unsigned long addr)
+asmlinkage int sys_set_thread_area(unsigned long addr)
{
struct thread_info *ti = task_thread_info(current);
@@ -271,6 +271,8 @@ void sys_set_thread_area(unsigned long addr)
/* If some future MIPS implementation has this register in hardware,
* we will need to update it here (and in context switches). */
+
+ return 0;
}
asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 954a198494ef..e51d8fd9a152 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
+#include <linux/interrupt.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -72,28 +73,68 @@ void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
-/*
- * These constant is for searching for possible module text segments.
- * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
- */
-#define MODULE_RANGE (8*1024*1024)
+
+static void show_raw_backtrace(unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ printk("Call Trace:");
+#ifdef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr))
+ print_ip_sym(addr);
+ }
+ printk("\n");
+}
+
+#ifdef CONFIG_KALLSYMS
+static int raw_show_trace;
+static int __init set_raw_show_trace(char *str)
+{
+ raw_show_trace = 1;
+ return 1;
+}
+__setup("raw_show_trace", set_raw_show_trace);
+
+extern unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long ra);
+
+static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ show_raw_backtrace(sp);
+ return;
+ }
+ printk("Call Trace:\n");
+ do {
+ print_ip_sym(pc);
+ pc = unwind_stack(task, &sp, pc, ra);
+ ra = 0;
+ } while (pc);
+ printk("\n");
+}
+#else
+#define show_backtrace(task, r) show_raw_backtrace((r)->regs[29]);
+#endif
/*
* This routine abuses get_user()/put_user() to reference pointers
* with at least a bit of error checking ...
*/
-void show_stack(struct task_struct *task, unsigned long *sp)
+static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
-
- if (!sp) {
- if (task && task != current)
- sp = (unsigned long *) task->thread.reg29;
- else
- sp = (unsigned long *) &sp;
- }
+ unsigned long *sp = (unsigned long *)regs->regs[29];
printk("Stack :");
i = 0;
@@ -114,32 +155,48 @@ void show_stack(struct task_struct *task, unsigned long *sp)
i++;
}
printk("\n");
+ show_backtrace(task, regs);
}
-void show_trace(struct task_struct *task, unsigned long *stack)
+static __always_inline void prepare_frametrace(struct pt_regs *regs)
{
- const int field = 2 * sizeof(unsigned long);
- unsigned long addr;
-
- if (!stack) {
- if (task && task != current)
- stack = (unsigned long *) task->thread.reg29;
- else
- stack = (unsigned long *) &stack;
- }
-
- printk("Call Trace:");
-#ifdef CONFIG_KALLSYMS
- printk("\n");
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noat\n\t"
+#ifdef CONFIG_64BIT
+ "1: dla $1, 1b\n\t"
+ "sd $1, %0\n\t"
+ "sd $29, %1\n\t"
+ "sd $31, %2\n\t"
+#else
+ "1: la $1, 1b\n\t"
+ "sw $1, %0\n\t"
+ "sw $29, %1\n\t"
+ "sw $31, %2\n\t"
#endif
- while (!kstack_end(stack)) {
- addr = *stack++;
- if (__kernel_text_address(addr)) {
- printk(" [<%0*lx>] ", field, addr);
- print_symbol("%s\n", addr);
+ ".set pop\n\t"
+ : "=m" (regs->cp0_epc),
+ "=m" (regs->regs[29]), "=m" (regs->regs[31])
+ : : "memory");
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp)
+{
+ struct pt_regs regs;
+ if (sp) {
+ regs.regs[29] = (unsigned long)sp;
+ regs.regs[31] = 0;
+ regs.cp0_epc = 0;
+ } else {
+ if (task && task != current) {
+ regs.regs[29] = task->thread.reg29;
+ regs.regs[31] = 0;
+ regs.cp0_epc = task->thread.reg31;
+ } else {
+ prepare_frametrace(&regs);
}
}
- printk("\n");
+ show_stacktrace(task, &regs);
}
/*
@@ -147,9 +204,15 @@ void show_trace(struct task_struct *task, unsigned long *stack)
*/
void dump_stack(void)
{
- unsigned long stack;
+ struct pt_regs regs;
- show_trace(current, &stack);
+ /*
+ * Remove any garbage that may be in regs (specially func
+ * addresses) to avoid show_raw_backtrace() to report them
+ */
+ memset(&regs, 0, sizeof(regs));
+ prepare_frametrace(&regs);
+ show_backtrace(current, &regs);
}
EXPORT_SYMBOL(dump_stack);
@@ -268,8 +331,7 @@ void show_registers(struct pt_regs *regs)
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
current->comm, current->pid, current_thread_info(), current);
- show_stack(current, (long *) regs->regs[29]);
- show_trace(current, (long *) regs->regs[29]);
+ show_stacktrace(current, regs);
show_code((unsigned int *) regs->cp0_epc);
printk("\n");
}
@@ -292,6 +354,16 @@ NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
spin_unlock_irq(&die_lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ ssleep(5);
+ panic("Fatal exception");
+ }
+
do_exit(SIGSEGV);
}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 9ee0ec2cd067..51ddd2166898 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -768,10 +768,16 @@ int vpe_run(struct vpe * v)
*/
write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
+
+ back_to_back_c0_hazard();
+
/* Set up the XTC bit in vpeconf0 to point at our tc */
write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
| (t->index << VPECONF0_XTC_SHIFT));
+ back_to_back_c0_hazard();
+
/* enable this VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);