summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/cps-vec.S96
-rw-r--r--arch/mips/kernel/cpu-probe.c15
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/head.S16
-rw-r--r--arch/mips/kernel/i8259.c43
-rw-r--r--arch/mips/kernel/irq.c54
-rw-r--r--arch/mips/kernel/irq_cpu.c169
-rw-r--r--arch/mips/kernel/kgdb.c4
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c3
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/scall32-o32.S37
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S35
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/signal-common.h9
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c6
-rw-r--r--arch/mips/kernel/smp-cps.c6
-rw-r--r--arch/mips/kernel/smp.c54
-rw-r--r--arch/mips/kernel/sysrq.c77
-rw-r--r--arch/mips/kernel/traps.c33
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S8
28 files changed, 347 insertions, 362 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index d3d2ff2d76dc..3f5cf8aff6f3 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -62,7 +62,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
obj-$(CONFIG_I8259) += i8259.o
-obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
@@ -77,6 +76,7 @@ obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index beabe19ff8e5..072fab13645d 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -1,5 +1,5 @@
/*
- * offset.c: Calculate pt_regs and task_struct offsets.
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index c0c5e5972256..d8f9b357b222 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
case blezl_op: /* not really i_format */
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case blez_op:
/*
@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
case bgtzl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case bgtz_op:
/*
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 55b759a0019e..1b6ca634e646 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry)
nop
/* This is an NMI */
- la k0, nmi_handler
+ PTR_LA k0, nmi_handler
jr k0
nop
@@ -107,10 +107,10 @@ not_nmi:
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- add a1, a0, t1
+ li a0, CKSEG0
+ PTR_ADD a1, a0, t1
1: cache Index_Store_Tag_I, 0(a0)
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
bne a0, a1, 1b
nop
icache_done:
@@ -134,12 +134,12 @@ icache_done:
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- addu a1, a0, t1
- subu a1, a1, t0
+ li a0, CKSEG0
+ PTR_ADDU a1, a0, t1
+ PTR_SUBU a1, a1, t0
1: cache Index_Store_Tag_D, 0(a0)
bne a0, a1, 1b
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
dcache_done:
/* Set Kseg0 CCA to that in s0 */
@@ -152,11 +152,11 @@ dcache_done:
/* Enter the coherent domain */
li t0, 0xff
- sw t0, GCR_CL_COHERENCE_OFS(v1)
+ PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
ehb
/* Jump to kseg0 */
- la t0, 1f
+ PTR_LA t0, 1f
jr t0
nop
@@ -178,9 +178,9 @@ dcache_done:
nop
/* Off we go! */
- lw t1, VPEBOOTCFG_PC(v0)
- lw gp, VPEBOOTCFG_GP(v0)
- lw sp, VPEBOOTCFG_SP(v0)
+ PTR_L t1, VPEBOOTCFG_PC(v0)
+ PTR_L gp, VPEBOOTCFG_GP(v0)
+ PTR_L sp, VPEBOOTCFG_SP(v0)
jr t1
nop
END(mips_cps_core_entry)
@@ -217,7 +217,7 @@ LEAF(excep_intex)
.org 0x480
LEAF(excep_ejtag)
- la k0, ejtag_debug_handler
+ PTR_LA k0, ejtag_debug_handler
jr k0
nop
END(excep_ejtag)
@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init)
nop
.set push
- .set mips32r2
+ .set mips64r2
.set mt
/* Only allow 1 TC per VPE to execute... */
@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init)
/* ...and for the moment only 1 VPE */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init)
mfc0 t0, CP0_MVPCONF0
srl t0, t0, MVPCONF0_PVPE_SHIFT
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
- addiu t7, t0, 1
+ addiu ta3, t0, 1
/* If there's only 1, we're done */
beqz t0, 2f
nop
/* Loop through each VPE within this core */
- li t5, 1
+ li ta1, 1
1: /* Operate on the appropriate TC */
- mtc0 t5, CP0_VPECONTROL
+ mtc0 ta1, CP0_VPECONTROL
ehb
/* Bind TC to VPE (1:1 TC:VPE mapping) */
- mttc0 t5, CP0_TCBIND
+ mttc0 ta1, CP0_TCBIND
/* Set exclusive TC, non-active, master */
li t0, VPECONF0_MVP
- sll t1, t5, VPECONF0_XTC_SHIFT
+ sll t1, ta1, VPECONF0_XTC_SHIFT
or t0, t0, t1
mttc0 t0, CP0_VPECONF0
@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init)
mttc0 t0, CP0_TCHALT
/* Next VPE */
- addiu t5, t5, 1
- slt t0, t5, t7
+ addiu ta1, ta1, 1
+ slt t0, ta1, ta3
bnez t0, 1b
nop
@@ -298,19 +298,19 @@ LEAF(mips_cps_core_init)
LEAF(mips_cps_boot_vpes)
/* Retrieve CM base address */
- la t0, mips_cm_base
- lw t0, 0(t0)
+ PTR_LA t0, mips_cm_base
+ PTR_L t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */
- lw t0, GCR_CL_ID_OFS(t0)
+ PTR_L t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
- la t1, mips_cps_core_bootcfg
- lw t1, 0(t1)
- addu t0, t0, t1
+ PTR_LA t1, mips_cps_core_bootcfg
+ PTR_L t1, 0(t1)
+ PTR_ADDU t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
- has_mt t6, 1f
+ has_mt ta2, 1f
li t9, 0
/* Find the number of VPEs present in the core */
@@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes)
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
mul v0, t9, t1
- lw t7, COREBOOTCFG_VPECONFIG(t0)
- addu v0, v0, t7
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
+ PTR_ADDU v0, v0, ta3
#ifdef CONFIG_MIPS_MT
/* If the core doesn't support MT then return */
- bnez t6, 1f
+ bnez ta2, 1f
nop
jr ra
nop
.set push
- .set mips32r2
+ .set mips64r2
.set mt
1: /* Enter VPE configuration state */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
1: mfc0 t1, CP0_MVPCONTROL
@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes)
ehb
/* Loop through each VPE */
- lw t6, COREBOOTCFG_VPEMASK(t0)
- move t8, t6
- li t5, 0
+ PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
+ move t8, ta2
+ li ta1, 0
/* Check whether the VPE should be running. If not, skip it */
-1: andi t0, t6, 1
+1: andi t0, ta2, 1
beqz t0, 2f
nop
@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes)
mfc0 t0, CP0_VPECONTROL
ori t0, t0, VPECONTROL_TARGTC
xori t0, t0, VPECONTROL_TARGTC
- or t0, t0, t5
+ or t0, t0, ta1
mtc0 t0, CP0_VPECONTROL
ehb
@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes)
/* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE
- mul t0, t0, t5
- addu t0, t0, t7
+ mul t0, t0, ta1
+ addu t0, t0, ta3
/* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0)
@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes)
mttc0 t0, CP0_VPECONF0
/* Next VPE */
-2: srl t6, t6, 1
- addiu t5, t5, 1
- bnez t6, 1b
+2: srl ta2, ta2, 1
+ addiu ta1, ta1, 1
+ bnez ta2, 1b
nop
/* Leave VPE configuration state */
@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes)
/* This VPE should be offline, halt the TC */
li t0, TCHALT_H
mtc0 t0, CP0_TCHALT
- la t0, 1f
+ PTR_LA t0, 1f
1: jr.hb t0
nop
@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes)
.set noat
lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG
- la \dest, __per_cpu_offset
+ PTR_LA \dest, __per_cpu_offset
addu $1, $1, \dest
lw $1, 0($1)
- la \dest, cps_cpu_state
+ PTR_LA \dest, cps_cpu_state
addu \dest, \dest, $1
.set pop
.endm
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index e36515dcd3b2..dbe0792fc9c1 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
{
unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+ fcsr = c->fpu_csr31;
mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
sr = read_c0_status();
__enable_fpu(FPU_AS_IS);
- fcsr = read_32bit_cp1_register(CP1_STATUS);
-
fcsr0 = fcsr & mask;
write_32bit_cp1_register(CP1_STATUS, fcsr0);
fcsr0 = read_32bit_cp1_register(CP1_STATUS);
@@ -946,7 +945,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
@@ -961,7 +960,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC | MIPS_CPU_BP_GHIST;
c->tlbsize = 64;
break;
case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
@@ -1444,7 +1443,9 @@ void cpu_probe(void)
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
- case PRID_COMP_INGENIC:
+ case PRID_COMP_INGENIC_D0:
+ case PRID_COMP_INGENIC_D1:
+ case PRID_COMP_INGENIC_E1:
cpu_probe_ingenic(c, cpu);
break;
case PRID_COMP_NETLOGIC:
@@ -1479,6 +1480,10 @@ void cpu_probe(void)
else
cpu_set_nofpu_opts(c);
+ if (cpu_has_bp_ghist)
+ write_c0_r10k_diag(read_c0_r10k_diag() |
+ R10K_DIAG_E_GHIST);
+
if (cpu_has_mips_r2_r6) {
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
/* R2 has Performance Counter Interrupt indicator */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index af42e7003f12..baa7b6fc0a60 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noat
SAVE_ALL
FEXPORT(handle_\exception\ext)
- __BUILD_clear_\clear
+ __build_clear_\clear
.set at
__BUILD_\verbose \exception
move a0, sp
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 95afd663cd45..4e4cc5b9a771 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -94,6 +94,22 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
+#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+ PTR_LA t0, __appended_dtb
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ li t1, 0xd00dfeed
+#else
+ li t1, 0xedfe0dd0
+#endif
+ lw t2, (t0)
+ bne t1, t2, not_found
+ nop
+
+ move a1, t0
+ PTR_LI a0, -2
+not_found:
+#endif
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index a74ec3ae557c..74f6752814d3 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/irq.h>
@@ -21,6 +22,8 @@
#include <asm/i8259.h>
#include <asm/io.h>
+#include "../../drivers/irqchip/irqchip.h"
+
/*
* This is the 'legacy' 8259A Programmable Interrupt Controller,
* present in the majority of PC/AT boxes.
@@ -327,7 +330,7 @@ static struct irq_domain_ops i8259A_ops = {
* driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
-void __init init_i8259_irqs(void)
+struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
{
struct irq_domain *domain;
@@ -336,10 +339,46 @@ void __init init_i8259_irqs(void)
init_8259A(0);
- domain = irq_domain_add_legacy(NULL, 16, I8259A_IRQ_BASE, 0,
+ domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
&i8259A_ops, NULL);
if (!domain)
panic("Failed to add i8259 IRQ domain");
setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+ return domain;
+}
+
+void __init init_i8259_irqs(void)
+{
+ __init_i8259_irqs(NULL);
+}
+
+static void i8259_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ struct irq_domain *domain = irq_get_handler_data(irq);
+ int hwirq = i8259_irq();
+
+ if (hwirq < 0)
+ return;
+
+ irq = irq_linear_revmap(domain, hwirq);
+ generic_handle_irq(irq);
+}
+
+int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *domain;
+ unsigned int parent_irq;
+
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map i8259 parent IRQ\n");
+ return -ENODEV;
+ }
+
+ domain = __init_i8259_irqs(node);
+ irq_set_handler_data(parent_irq, domain);
+ irq_set_chained_handler(parent_irq, i8259_irq_dispatch);
+ return 0;
}
+IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index d2bfbc2e8995..8eb5af805964 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -25,48 +25,6 @@
#include <linux/atomic.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_KGDB
-int kgdb_early_setup;
-#endif
-
-static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
-
-int allocate_irqno(void)
-{
- int irq;
-
-again:
- irq = find_first_zero_bit(irq_map, NR_IRQS);
-
- if (irq >= NR_IRQS)
- return -ENOSPC;
-
- if (test_and_set_bit(irq, irq_map))
- goto again;
-
- return irq;
-}
-
-/*
- * Allocate the 16 legacy interrupts for i8259 devices. This happens early
- * in the kernel initialization so treating allocation failure as BUG() is
- * ok.
- */
-void __init alloc_legacy_irqno(void)
-{
- int i;
-
- for (i = 0; i <= 16; i++)
- BUG_ON(test_and_set_bit(i, irq_map));
-}
-
-void free_irqno(unsigned int irq)
-{
- smp_mb__before_atomic();
- clear_bit(irq, irq_map);
- smp_mb__after_atomic();
-}
-
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -93,23 +51,13 @@ void __init init_IRQ(void)
{
int i;
-#ifdef CONFIG_KGDB
- if (kgdb_early_setup)
- return;
-#endif
-
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
arch_init_irq();
-
-#ifdef CONFIG_KGDB
- if (!kgdb_early_setup)
- kgdb_early_setup = 1;
-#endif
}
-#ifdef DEBUG_STACKOVERFLOW
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
static inline void check_stack_overflow(void)
{
unsigned long sp;
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
deleted file mode 100644
index 6eb7a3f515fc..000000000000
--- a/arch/mips/kernel/irq_cpu.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Copyright 2001 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
- *
- * This file define the irq handler for MIPS CPU interrupts.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/*
- * Almost all MIPS CPUs define 8 interrupt sources. They are typically
- * level triggered (i.e., cannot be cleared from CPU; must be cleared from
- * device). The first two are software interrupts which we don't really
- * use or support. The last one is usually the CPU timer interrupt if
- * counter register is present or, for CPUs with an external FPU, by
- * convention it's the FPU exception interrupt.
- *
- * Don't even think about using this on SMP. You have been warned.
- *
- * This file exports one global function:
- * void mips_cpu_irq_init(void);
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/mipsmtregs.h>
-#include <asm/setup.h>
-
-static inline void unmask_mips_irq(struct irq_data *d)
-{
- set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_enable_hazard();
-}
-
-static inline void mask_mips_irq(struct irq_data *d)
-{
- clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_disable_hazard();
-}
-
-static struct irq_chip mips_cpu_irq_controller = {
- .name = "MIPS",
- .irq_ack = mask_mips_irq,
- .irq_mask = mask_mips_irq,
- .irq_mask_ack = mask_mips_irq,
- .irq_unmask = unmask_mips_irq,
- .irq_eoi = unmask_mips_irq,
- .irq_disable = mask_mips_irq,
- .irq_enable = unmask_mips_irq,
-};
-
-/*
- * Basically the same as above but taking care of all the MT stuff
- */
-
-static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
-{
- unsigned int vpflags = dvpe();
-
- clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- evpe(vpflags);
- unmask_mips_irq(d);
- return 0;
-}
-
-/*
- * While we ack the interrupt interrupts are disabled and thus we don't need
- * to deal with concurrency issues. Same for mips_cpu_irq_end.
- */
-static void mips_mt_cpu_irq_ack(struct irq_data *d)
-{
- unsigned int vpflags = dvpe();
- clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- evpe(vpflags);
- mask_mips_irq(d);
-}
-
-static struct irq_chip mips_mt_cpu_irq_controller = {
- .name = "MIPS",
- .irq_startup = mips_mt_cpu_irq_startup,
- .irq_ack = mips_mt_cpu_irq_ack,
- .irq_mask = mask_mips_irq,
- .irq_mask_ack = mips_mt_cpu_irq_ack,
- .irq_unmask = unmask_mips_irq,
- .irq_eoi = unmask_mips_irq,
- .irq_disable = mask_mips_irq,
- .irq_enable = unmask_mips_irq,
-};
-
-asmlinkage void __weak plat_irq_dispatch(void)
-{
- unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
- int irq;
-
- if (!pending) {
- spurious_interrupt();
- return;
- }
-
- pending >>= CAUSEB_IP;
- while (pending) {
- irq = fls(pending) - 1;
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
- pending &= ~BIT(irq);
- }
-}
-
-static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hw)
-{
- static struct irq_chip *chip;
-
- if (hw < 2 && cpu_has_mipsmt) {
- /* Software interrupts are used for MT/CMT IPI */
- chip = &mips_mt_cpu_irq_controller;
- } else {
- chip = &mips_cpu_irq_controller;
- }
-
- if (cpu_has_vint)
- set_vi_handler(hw, plat_irq_dispatch);
-
- irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
- .map = mips_cpu_intc_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
-static void __init __mips_cpu_irq_init(struct device_node *of_node)
-{
- struct irq_domain *domain;
-
- /* Mask interrupts. */
- clear_c0_status(ST0_IM);
- clear_c0_cause(CAUSEF_IP);
-
- domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
- &mips_cpu_intc_irq_domain_ops, NULL);
- if (!domain)
- panic("Failed to add irqdomain for MIPS CPU");
-}
-
-void __init mips_cpu_irq_init(void)
-{
- __mips_cpu_irq_init(NULL);
-}
-
-int __init mips_cpu_irq_of_init(struct device_node *of_node,
- struct device_node *parent)
-{
- __mips_cpu_irq_init(of_node);
- return 0;
-}
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 7afcc2f22c0d..de63d36af895 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -378,10 +378,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
struct kgdb_arch arch_kgdb_ops;
-/*
- * We use kgdb_early_setup so that functions we need to call now don't
- * cause trouble when called again later.
- */
int kgdb_arch_init(void)
{
union mips_instruction insn = {
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
- cpumask_t mask;
+ cpumask_t allowed, mask;
int retval;
struct task_struct *p;
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;
- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
out_unlock:
read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index e303cb1ef2f4..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -18,6 +18,7 @@
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <asm/bootinfo.h>
#include <asm/page.h>
#include <asm/prom.h>
@@ -37,7 +38,7 @@ char *mips_get_machine_name(void)
return mips_machine_name;
}
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
process_entry:
PTR_L s2, (s0)
- PTR_ADD s0, s0, SZREG
+ PTR_ADDIU s0, s0, SZREG
/*
* In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
- PTR_ADD s4, s4, SZREG
- PTR_ADD s2, s2, SZREG
- LONG_SUB s6, s6, 1
+ PTR_ADDIU s4, s4, SZREG
+ PTR_ADDIU s2, s2, SZREG
+ LONG_ADDIU s6, s6, -1
beq s6, zero, process_entry
b copy_word
b process_entry
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 6e8de80bb446..4cc13508d967 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp)
.set noreorder
.set nomacro
-1: user_lw(t5, 16(t0)) # argument #5 from usp
-4: user_lw(t6, 20(t0)) # argument #6 from usp
-3: user_lw(t7, 24(t0)) # argument #7 from usp
-2: user_lw(t8, 28(t0)) # argument #8 from usp
+load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
+load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
+load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
+load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
+loads_done:
sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp
@@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp)
.set pop
.section __ex_table,"a"
- PTR 1b,bad_stack
- PTR 2b,bad_stack
- PTR 3b,bad_stack
- PTR 4b,bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
lw t0, TI_FLAGS($28) # syscall tracing enabled?
@@ -153,8 +154,8 @@ syscall_trace_entry:
/* ------------------------------------------------------------------------ */
/*
- * The stackpointer for a call with more than 4 arguments is bad.
- * We probably should handle this case a bit more drastic.
+ * Our open-coded access area sanity test for the stack pointer
+ * failed. We probably should handle this case a bit more drastic.
*/
bad_stack:
li v0, EFAULT
@@ -163,6 +164,22 @@ bad_stack:
sw t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li t5, 0
+ b load_a5
+
+bad_stack_a5:
+ li t6, 0
+ b load_a6
+
+bad_stack_a6:
+ li t7, 0
+ b load_a7
+
+bad_stack_a7:
+ li t8, 0
+ b loads_done
+
/*
* The system call does not exist in this kernel
*/
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d44635c76..a6f6b762c47a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_64_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc654da56..4b2010654c46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
SAVE_STATIC
move s0, t2
move a0, sp
- daddiu a1, v0, __NR_N32_Linux
+ move a1, v0
jal syscall_trace_enter
bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index d07b210fbeff..f543ff4feef9 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp)
daddu t1, t0, 32
bltz t1, bad_stack
-1: lw a4, 16(t0) # argument #5 from usp
-2: lw a5, 20(t0) # argument #6 from usp
-3: lw a6, 24(t0) # argument #7 from usp
-4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
+load_a4: lw a4, 16(t0) # argument #5 from usp
+load_a5: lw a5, 20(t0) # argument #6 from usp
+load_a6: lw a6, 24(t0) # argument #7 from usp
+load_a7: lw a7, 28(t0) # argument #8 from usp
+loads_done:
.section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- PTR 3b, bad_stack
- PTR 4b, bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -167,6 +168,22 @@ bad_stack:
sd t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li a4, 0
+ b load_a5
+
+bad_stack_a5:
+ li a5, 0
+ b load_a6
+
+bad_stack_a6:
+ li a6, 0
+ b load_a7
+
+bad_stack_a7:
+ li a7, 0
+ b loads_done
+
not_o32_scall:
/*
* This is not an o32 compatibility syscall, pass it on
@@ -383,7 +400,7 @@ EXPORT(sys32_call_table)
PTR sys_connect /* 4170 */
PTR sys_getpeername
PTR sys_getsockname
- PTR sys_getsockopt
+ PTR compat_sys_getsockopt
PTR sys_listen
PTR compat_sys_recv /* 4175 */
PTR compat_sys_recvfrom
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index be73c491182b..008b3378653a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -337,6 +337,11 @@ static void __init bootmem_init(void)
min_low_pfn = start;
if (end <= reserved_end)
continue;
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* mapstart should be after initrd_end */
+ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
+ continue;
+#endif
if (start >= mapstart)
continue;
mapstart = max(reserved_end, start);
@@ -366,14 +371,6 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
-#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * mapstart should be after initrd_end
- */
- if (initrd_end)
- mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
-#endif
-
/*
* Initialize the boot-time allocator with low memory only.
*/
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 06805e09bcd3..0b85f827cd18 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -28,12 +28,7 @@ extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
extern int fpcsr_pending(unsigned int __user *fpcsr);
/* Make sure we will not lose FPU ownership */
-#ifdef CONFIG_PREEMPT
-#define lock_fpu_owner() preempt_disable()
-#define unlock_fpu_owner() preempt_enable()
-#else
-#define lock_fpu_owner() pagefault_disable()
-#define unlock_fpu_owner() pagefault_enable()
-#endif
+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
- memset(to, 0, sizeof *to);
-
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad,
from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index fd528d7ea278..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
if (action == 0)
scheduler_ipi();
else
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
if (action & SMP_RESCHEDULE_YOURSELF)
scheduler_ipi();
if (action & SMP_CALL_FUNCTION)
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
return IRQ_HANDLED;
}
@@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = {
static void bmips_wr_vec(unsigned long dst, char *start, char *end)
{
memcpy((void *)dst, start, end - start);
- dma_cache_wback((unsigned long)start, end - start);
+ dma_cache_wback(dst, end - start);
local_flush_icache_range(dst, dst + (end - start));
instruction_hazard();
}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 4251d390b5b6..c88937745b4e 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/*
* Patch the start of mips_cps_core_entry to provide:
*
- * v0 = CM base address
+ * v1 = CM base address
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
@@ -369,7 +369,7 @@ void play_dead(void)
static void wait_for_sibling_halt(void *ptr_cpu)
{
- unsigned cpu = (unsigned)ptr_cpu;
+ unsigned cpu = (unsigned long)ptr_cpu;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted;
unsigned long flags;
@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu)
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
- (void *)cpu, 1);
+ (void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index faa46ebd9dda..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
}
}
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+static inline void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpu_data[i].package == cpu_data[k].package &&
+ cpu_data[i].core == cpu_data[k].core)
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+}
+
struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
+ calculate_cpu_foreign_map();
+
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
@@ -160,22 +192,21 @@ asmlinkage void start_secondary(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-/*
- * Call into both interrupt handlers, as we share the IPI for them
- */
-void __irq_entry smp_call_function_interrupt(void)
-{
- irq_enter();
- generic_smp_call_function_interrupt();
- irq_exit();
-}
-
static void stop_this_cpu(void *dummy)
{
/*
- * Remove this CPU:
+ * Remove this CPU. Be a bit slow here and
+ * set the bits for every online CPU so we don't miss
+ * any IPI whilst taking this VPE down.
*/
+
+ cpumask_copy(&cpu_foreign_map, cpu_online_mask);
+
+ /* Make it visible to every other CPU */
+ smp_mb();
+
set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
local_irq_disable();
while (1);
}
@@ -197,6 +228,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
new file mode 100644
index 000000000000..5b539f5fc9d9
--- /dev/null
+++ b/arch/mips/kernel/sysrq.c
@@ -0,0 +1,77 @@
+/*
+ * MIPS specific sysrq operations.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+#include <asm/tlbdebug.h>
+
+/*
+ * Dump TLB entries on all CPUs.
+ */
+
+static DEFINE_SPINLOCK(show_lock);
+
+static void sysrq_tlbdump_single(void *dummy)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long flags;
+
+ spin_lock_irqsave(&show_lock, flags);
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ pr_info("Index : %0x\n", read_c0_index());
+ pr_info("Pagemask: %0x\n", read_c0_pagemask());
+ pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
+ pr_info("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
+ pr_info("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
+ pr_info("Wired : %0x\n", read_c0_wired());
+ pr_info("Pagegrain: %0x\n", read_c0_pagegrain());
+ if (cpu_has_htw) {
+ pr_info("PWField : %0*lx\n", field, read_c0_pwfield());
+ pr_info("PWSize : %0*lx\n", field, read_c0_pwsize());
+ pr_info("PWCtl : %0x\n", read_c0_pwctl());
+ }
+ pr_info("\n");
+ dump_tlb_all();
+ pr_info("\n");
+
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(sysrq_tlbdump_single, NULL, 0);
+}
+
+static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
+#endif
+
+static void sysrq_handle_tlbdump(int key)
+{
+ sysrq_tlbdump_single(NULL);
+#ifdef CONFIG_SMP
+ schedule_work(&sysrq_tlbdump);
+#endif
+}
+
+static struct sysrq_key_op sysrq_tlbdump_op = {
+ .handler = sysrq_handle_tlbdump,
+ .help_msg = "show-tlbs(x)",
+ .action_msg = "Show TLB entries",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init mips_sysrq_init(void)
+{
+ return register_sysrq_key('x', &sysrq_tlbdump_op);
+}
+arch_initcall(mips_sysrq_init);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d2d1c1933bc9..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
+ mm_segment_t old_fs = get_fs();
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
prepare_frametrace(&regs);
}
}
+ /*
+ * show_stack() deals exclusively with kernel mode, so be sure to access
+ * the stack in the kernel (not user) address space.
+ */
+ set_fs(KERNEL_DS);
show_stacktrace(task, &regs);
+ set_fs(old_fs);
}
static void show_code(unsigned int __user *pc)
@@ -236,6 +243,7 @@ static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int cause = regs->cp0_cause;
+ unsigned int exccode;
int i;
show_regs_print_info(KERN_DEFAULT);
@@ -317,10 +325,10 @@ static void __show_regs(const struct pt_regs *regs)
}
printk("\n");
- printk("Cause : %08x\n", cause);
+ exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
- cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
- if (1 <= cause && cause <= 5)
+ if (1 <= exccode && exccode <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
printk("PrId : %08x (%s)\n", read_c0_prid(),
@@ -1518,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
+ mm_segment_t old_fs = get_fs();
prev_state = exception_enter();
show_regs(regs);
@@ -1539,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all();
}
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+
show_code((unsigned int __user *) regs->cp0_epc);
+ set_fs(old_fs);
+
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
@@ -2129,10 +2143,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
- /* Boot CPU's cache setup in setup_arch(). */
- if (!is_boot_cpu)
- cpu_cache_init();
- tlb_init();
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
+ tlb_init();
TLBMISS_HANDLER_SETUP();
}
@@ -2184,11 +2198,6 @@ void __init trap_init(void)
check_wait();
-#if defined(CONFIG_KGDB)
- if (kgdb_early_setup)
- return; /* Already done */
-#endif
-
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
ebase = (unsigned long)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
: "memory"); \
} while(0)
-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 3b46f7ce9ca7..07d32a4aea60 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -125,8 +125,14 @@ SECTIONS
.exit.data : {
EXIT_DATA
}
-
+#ifdef CONFIG_SMP
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+#endif
+#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+ __appended_dtb = .;
+ /* leave space for appended DTB */
+ . += 0x100000;
+#endif
/*
* Align to 64K in attempt to eliminate holes before the
* .bss..swapper_pg_dir section at the start of .bss. This