diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-17 21:50:54 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-17 21:50:54 +0200 |
commit | bfaf245022b4b8661af2e35f467cf0e91943c24c (patch) | |
tree | b5a6ee49a047557a791eb897c8c9545a155e36b7 /arch/mips/kernel | |
parent | Merge tag 'xtensa-20150416' of git://github.com/czankel/xtensa-linux (diff) | |
parent | Merge branch '4.0-fixes' into mips-for-linux-next (diff) | |
download | linux-bfaf245022b4b8661af2e35f467cf0e91943c24c.tar.xz linux-bfaf245022b4b8661af2e35f467cf0e91943c24c.zip |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle:
"This is the main pull request for MIPS for Linux 4.1. Most
noteworthy:
- Add more Octeon-optimized crypto functions
- Octeon crypto preemption and locking fixes
- Little endian support for Octeon
- Use correct CSR to soft reset Octeons
- Support LEDs on the Octeon-based DSR-1000N
- Fix PCI interrupt mapping for the Octeon-based DSR-1000N
- Mark prom_free_prom_memory() as __init for a number of systems
- Support for Imagination's Pistachio SOC. This includes arch and
CLK bits. I'd like to merge pinctrl bits later
- Improve parallelism of csum_partial for certain pipelines
- Organize DTB files in subdirs like other architectures
- Implement read_sched_clock for all MIPS platforms other than
Octeon
- Massive series of 38 fixes and cleanups for the FPU emulator /
kernel
- Further FPU remulator work to support new features. This sits on a
separate branch which also has been pulled into the 4.1 KVM branch
- Clean up and fixes for the SEAD3 eval board; remove unused file
- Various updates for Netlogic platforms
- A number of small updates for Loongson 3 platforms
- Increase the memory limit for ATH79 platforms to 256MB
- A fair number of fixes and updates for BCM47xx platforms
- Finish the implementation of XPA support
- MIPS FDC support. No, not floppy controller but Fast Debug Channel :)
- Detect the R16000 used in SGI legacy platforms
- Fix Kconfig dependencies for the SSB bus support"
* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (265 commits)
MIPS: Makefile: Fix MIPS ASE detection code
MIPS: asm: elf: Set O32 default FPU flags
MIPS: BCM47XX: Fix detecting Microsoft MN-700 & Asus WL500G
MIPS: Kconfig: Disable SMP/CPS for 64-bit
MIPS: Hibernate: flush TLB entries earlier
MIPS: smp-cps: cpu_set FPU mask if FPU present
MIPS: lose_fpu(): Disable FPU when MSA enabled
MIPS: ralink: add missing symbol for RALINK_ILL_ACC
MIPS: ralink: Fix bad config symbol in PCI makefile.
SSB: fix Kconfig dependencies
MIPS: Malta: Detect and fix bad memsize values
Revert "MIPS: Avoid pipeline stalls on some MIPS32R2 cores."
MIPS: Octeon: Delete override of cpu_has_mips_r2_exec_hazard.
MIPS: Fix cpu_has_mips_r2_exec_hazard.
MIPS: kernel: entry.S: Set correct ISA level for mips_ihb
MIPS: asm: spinlock: Fix addiu instruction for R10000_LLSC_WAR case
MIPS: r4kcache: Use correct base register for MIPS R6 cache flushes
MIPS: Kconfig: Fix typo for the r2-to-r6 emulator kernel parameter
MIPS: unaligned: Fix regular load/store instruction emulation for EVA
MIPS: unaligned: Surround load/store macros in do {} while statements
...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/branch.c | 6 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-r4k.c | 33 | ||||
-rw-r--r-- | arch/mips/kernel/cevt-txx9.c | 9 | ||||
-rw-r--r-- | arch/mips/kernel/cpu-probe.c | 200 | ||||
-rw-r--r-- | arch/mips/kernel/csrc-bcm1480.c | 12 | ||||
-rw-r--r-- | arch/mips/kernel/csrc-ioasic.c | 13 | ||||
-rw-r--r-- | arch/mips/kernel/csrc-r4k.c | 8 | ||||
-rw-r--r-- | arch/mips/kernel/csrc-sb1250.c | 23 | ||||
-rw-r--r-- | arch/mips/kernel/elf.c | 14 | ||||
-rw-r--r-- | arch/mips/kernel/entry.S | 3 | ||||
-rw-r--r-- | arch/mips/kernel/idle.c | 13 | ||||
-rw-r--r-- | arch/mips/kernel/mips-r2-to-r6-emul.c | 15 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 83 | ||||
-rw-r--r-- | arch/mips/kernel/proc.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/prom.c | 5 | ||||
-rw-r--r-- | arch/mips/kernel/ptrace.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/r2300_switch.S | 7 | ||||
-rw-r--r-- | arch/mips/kernel/r4k_switch.S | 7 | ||||
-rw-r--r-- | arch/mips/kernel/reset.c | 25 | ||||
-rw-r--r-- | arch/mips/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 6 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 6 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 276 | ||||
-rw-r--r-- | arch/mips/kernel/unaligned.c | 346 |
25 files changed, 794 insertions, 339 deletions
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index c2e0f45ddf6c..c0c5e5972256 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -36,8 +36,10 @@ int __isa_exception_epc(struct pt_regs *regs) return epc; } if (cpu_has_mips16) { - if (((union mips16e_instruction)inst).ri.opcode - == MIPS16e_jal_op) + union mips16e_instruction inst_mips16e; + + inst_mips16e.full = inst; + if (inst_mips16e.ri.opcode == MIPS16e_jal_op) epc += 4; else epc += 2; diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 82bd2b278a24..d70c4d893219 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -37,6 +37,24 @@ void mips_set_clock_mode(enum clock_event_mode mode, DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); int cp0_timer_irq_installed; +/* + * Possibly handle a performance counter interrupt. + * Return true if the timer interrupt should not be checked + */ +static inline int handle_perf_irq(int r2) +{ + /* + * The performance counter overflow interrupt may be shared with the + * timer interrupt (cp0_perfcount_irq < 0). If it is and a + * performance counter has overflowed (perf_irq() == IRQ_HANDLED) + * and we can't reliably determine if a counter interrupt has also + * happened (!r2) then don't check for a timer interrupt. + */ + return (cp0_perfcount_irq < 0) && + perf_irq() == IRQ_HANDLED && + !r2; +} + irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2_r6; @@ -50,27 +68,32 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) * the performance counter interrupt handler anyway. */ if (handle_perf_irq(r2)) - goto out; + return IRQ_HANDLED; /* * The same applies to performance counter interrupts. But with the * above we now know that the reason we got here must be a timer * interrupt. Being the paranoiacs we are we check anyway. */ - if (!r2 || (read_c0_cause() & (1 << 30))) { + if (!r2 || (read_c0_cause() & CAUSEF_TI)) { /* Clear Count/Compare Interrupt */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); + + return IRQ_HANDLED; } -out: - return IRQ_HANDLED; + return IRQ_NONE; } struct irqaction c0_compare_irqaction = { .handler = c0_compare_interrupt, - .flags = IRQF_PERCPU | IRQF_TIMER, + /* + * IRQF_SHARED: The timer interrupt may be shared with other interrupts + * such as perf counter and FDC interrupts. + */ + .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED, .name = "timer", }; diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 2ae08462e46e..723932441ecc 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/sched_clock.h> #include <asm/time.h> #include <asm/txx9tmr.h> @@ -46,6 +47,11 @@ static struct txx9_clocksource txx9_clocksource = { }, }; +static u64 notrace txx9_read_sched_clock(void) +{ + return __raw_readl(&txx9_clocksource.tmrptr->trr); +} + void __init txx9_clocksource_init(unsigned long baseaddr, unsigned int imbusclk) { @@ -61,6 +67,9 @@ void __init txx9_clocksource_init(unsigned long baseaddr, __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra); __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); txx9_clocksource.tmrptr = tmrptr; + + sched_clock_register(txx9_read_sched_clock, TXX9_CLOCKSOURCE_BITS, + TIMER_CLK(imbusclk)); } struct txx9_clock_event_device { diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 48dfb9de853d..e36515dcd3b2 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -20,6 +20,7 @@ #include <asm/bugs.h> #include <asm/cpu.h> +#include <asm/cpu-features.h> #include <asm/cpu-type.h> #include <asm/fpu.h> #include <asm/mipsregs.h> @@ -31,11 +32,127 @@ #include <asm/spram.h> #include <asm/uaccess.h> +/* + * Get the FPU Implementation/Revision. + */ +static inline unsigned long cpu_get_fpu_id(void) +{ + unsigned long tmp, fpu_id; + + tmp = read_c0_status(); + __enable_fpu(FPU_AS_IS); + fpu_id = read_32bit_cp1_register(CP1_REVISION); + write_c0_status(tmp); + return fpu_id; +} + +/* + * Check if the CPU has an external FPU. + */ +static inline int __cpu_has_fpu(void) +{ + return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; +} + +static inline unsigned long cpu_get_msa_id(void) +{ + unsigned long status, msa_id; + + status = read_c0_status(); + __enable_fpu(FPU_64BIT); + enable_msa(); + msa_id = read_msa_ir(); + disable_msa(); + write_c0_status(status); + return msa_id; +} + +/* + * Determine the FCSR mask for FPU hardware. + */ +static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) +{ + unsigned long sr, mask, fcsr, fcsr0, fcsr1; + + mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; + + sr = read_c0_status(); + __enable_fpu(FPU_AS_IS); + + fcsr = read_32bit_cp1_register(CP1_STATUS); + + fcsr0 = fcsr & mask; + write_32bit_cp1_register(CP1_STATUS, fcsr0); + fcsr0 = read_32bit_cp1_register(CP1_STATUS); + + fcsr1 = fcsr | ~mask; + write_32bit_cp1_register(CP1_STATUS, fcsr1); + fcsr1 = read_32bit_cp1_register(CP1_STATUS); + + write_32bit_cp1_register(CP1_STATUS, fcsr); + + write_c0_status(sr); + + c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask; +} + +/* + * Set the FIR feature flags for the FPU emulator. + */ +static void cpu_set_nofpu_id(struct cpuinfo_mips *c) +{ + u32 value; + + value = 0; + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) + value |= MIPS_FPIR_D | MIPS_FPIR_S; + if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) + value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W; + c->fpu_id = value; +} + +/* Determined FPU emulator mask to use for the boot CPU with "nofpu". */ +static unsigned int mips_nofpu_msk31; + +/* + * Set options for FPU hardware. + */ +static void cpu_set_fpu_opts(struct cpuinfo_mips *c) +{ + c->fpu_id = cpu_get_fpu_id(); + mips_nofpu_msk31 = c->fpu_msk31; + + if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { + if (c->fpu_id & MIPS_FPIR_3D) + c->ases |= MIPS_ASE_MIPS3D; + if (c->fpu_id & MIPS_FPIR_FREP) + c->options |= MIPS_CPU_FRE; + } + + cpu_set_fpu_fcsr_mask(c); +} + +/* + * Set options for the FPU emulator. + */ +static void cpu_set_nofpu_opts(struct cpuinfo_mips *c) +{ + c->options &= ~MIPS_CPU_FPU; + c->fpu_msk31 = mips_nofpu_msk31; + + cpu_set_nofpu_id(c); +} + static int mips_fpu_disabled; static int __init fpu_disable(char *s) { - cpu_data[0].options &= ~MIPS_CPU_FPU; + cpu_set_nofpu_opts(&boot_cpu_data); mips_fpu_disabled = 1; return 1; @@ -178,41 +295,6 @@ static inline void set_elf_platform(int cpu, const char *plat) __elf_platform = plat; } -/* - * Get the FPU Implementation/Revision. - */ -static inline unsigned long cpu_get_fpu_id(void) -{ - unsigned long tmp, fpu_id; - - tmp = read_c0_status(); - __enable_fpu(FPU_AS_IS); - fpu_id = read_32bit_cp1_register(CP1_REVISION); - write_c0_status(tmp); - return fpu_id; -} - -/* - * Check the CPU has an FPU the official way. - */ -static inline int __cpu_has_fpu(void) -{ - return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; -} - -static inline unsigned long cpu_get_msa_id(void) -{ - unsigned long status, msa_id; - - status = read_c0_status(); - __enable_fpu(FPU_64BIT); - enable_msa(); - msa_id = read_msa_ir(); - disable_msa(); - write_c0_status(status); - return msa_id; -} - static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) { #ifdef __NEED_VMBITS_PROBE @@ -441,6 +523,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->htw_seq = 0; c->options |= MIPS_CPU_HTW; } + if (config3 & MIPS_CONF3_CDMM) + c->options |= MIPS_CPU_CDMM; return config3 & MIPS_CONF_M; } @@ -516,6 +600,10 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) c->options |= MIPS_CPU_MAAR; if (config5 & MIPS_CONF5_LLB) c->options |= MIPS_CPU_RW_LLB; +#ifdef CONFIG_XPA + if (config5 & MIPS_CONF5_MVH) + c->options |= MIPS_CPU_XPA; +#endif return config5 & MIPS_CONF_M; } @@ -575,6 +663,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) case PRID_IMP_R2000: c->cputype = CPU_R2000; __cpu_name[cpu] = "R2000"; + c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) @@ -594,6 +683,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R3000; __cpu_name[cpu] = "R3000"; } + c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | MIPS_CPU_NOFPUEX; if (__cpu_has_fpu()) @@ -642,6 +732,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) } set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_WATCH | MIPS_CPU_VCE | MIPS_CPU_LLSC; @@ -649,6 +740,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) break; case PRID_IMP_VR41XX: set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; c->options = R4K_OPTS; c->tlbsize = 32; switch (c->processor_id & 0xf0) { @@ -690,6 +782,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R4300; __cpu_name[cpu] = "R4300"; set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 32; @@ -698,6 +791,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R4600; __cpu_name[cpu] = "R4600"; set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 48; @@ -713,11 +807,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R4650; __cpu_name[cpu] = "R4650"; set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; c->tlbsize = 48; break; #endif case PRID_IMP_TX39: + c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { @@ -743,6 +839,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R4700; __cpu_name[cpu] = "R4700"; set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | MIPS_CPU_LLSC; c->tlbsize = 48; @@ -751,6 +848,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_TX49XX; __cpu_name[cpu] = "R49XX"; set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; c->options = R4K_OPTS | MIPS_CPU_LLSC; if (!(c->processor_id & 0x08)) c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR; @@ -792,6 +890,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R6000; __cpu_name[cpu] = "R6000"; set_isa(c, MIPS_CPU_ISA_II); + c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | MIPS_CPU_LLSC; c->tlbsize = 32; @@ -800,6 +899,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_R6000A; __cpu_name[cpu] = "R6000A"; set_isa(c, MIPS_CPU_ISA_II); + c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | MIPS_CPU_LLSC; c->tlbsize = 32; @@ -850,8 +950,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->tlbsize = 64; break; case PRID_IMP_R14000: - c->cputype = CPU_R14000; - __cpu_name[cpu] = "R14000"; + if (((c->processor_id >> 4) & 0x0f) > 2) { + c->cputype = CPU_R16000; + __cpu_name[cpu] = "R16000"; + } else { + c->cputype = CPU_R14000; + __cpu_name[cpu] = "R14000"; + } set_isa(c, MIPS_CPU_ISA_IV); c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | MIPS_CPU_FPU | MIPS_CPU_32FPR | @@ -866,12 +971,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; break; case PRID_REV_LOONGSON2F: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); set_isa(c, MIPS_CPU_ISA_III); + c->fpu_msk31 |= FPU_CSR_CONDX; break; case PRID_REV_LOONGSON3A: c->cputype = CPU_LOONGSON3; @@ -1308,6 +1415,9 @@ void cpu_probe(void) c->cputype = CPU_UNKNOWN; c->writecombine = _CACHE_UNCACHED; + c->fpu_csr31 = FPU_CSR_RN; + c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008; + c->processor_id = read_c0_prid(); switch (c->processor_id & PRID_COMP_MASK) { case PRID_COMP_LEGACY: @@ -1364,16 +1474,10 @@ void cpu_probe(void) ~(1 << MIPS_PWCTL_PWEN_SHIFT)); } - if (c->options & MIPS_CPU_FPU) { - c->fpu_id = cpu_get_fpu_id(); - - if (c->isa_level & cpu_has_mips_r) { - if (c->fpu_id & MIPS_FPIR_3D) - c->ases |= MIPS_ASE_MIPS3D; - if (c->fpu_id & MIPS_FPIR_FREP) - c->options |= MIPS_CPU_FRE; - } - } + if (c->options & MIPS_CPU_FPU) + cpu_set_fpu_opts(c); + else + cpu_set_nofpu_opts(c); if (cpu_has_mips_r2_r6) { c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c index 468f3eba4132..7f65b53d1b24 100644 --- a/arch/mips/kernel/csrc-bcm1480.c +++ b/arch/mips/kernel/csrc-bcm1480.c @@ -10,12 +10,9 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/clocksource.h> +#include <linux/sched_clock.h> #include <asm/addrspace.h> #include <asm/io.h> @@ -41,6 +38,11 @@ struct clocksource bcm1480_clocksource = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static u64 notrace sb1480_read_sched_clock(void) +{ + return __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); +} + void __init sb1480_clocksource_init(void) { struct clocksource *cs = &bcm1480_clocksource; @@ -50,4 +52,6 @@ void __init sb1480_clocksource_init(void) plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); clocksource_register_hz(cs, zbbus); + + sched_clock_register(sb1480_read_sched_clock, 64, zbbus); } diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 6cbbf6e106b9..722f5589cd1d 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c @@ -12,12 +12,9 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/clocksource.h> +#include <linux/sched_clock.h> #include <linux/init.h> #include <asm/ds1287.h> @@ -37,6 +34,11 @@ static struct clocksource clocksource_dec = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static u64 notrace dec_ioasic_read_sched_clock(void) +{ + return ioasic_read(IO_REG_FCTR); +} + int __init dec_ioasic_clocksource_init(void) { unsigned int freq; @@ -65,5 +67,8 @@ int __init dec_ioasic_clocksource_init(void) clocksource_dec.rating = 200 + freq / 10000000; clocksource_register_hz(&clocksource_dec, freq); + + sched_clock_register(dec_ioasic_read_sched_clock, 32, freq); + return 0; } diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index decd1fa38d55..e5ed7ada1433 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -7,6 +7,7 @@ */ #include <linux/clocksource.h> #include <linux/init.h> +#include <linux/sched_clock.h> #include <asm/time.h> @@ -22,6 +23,11 @@ static struct clocksource clocksource_mips = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static u64 notrace r4k_read_sched_clock(void) +{ + return read_c0_count(); +} + int __init init_r4k_clocksource(void) { if (!cpu_has_counter || !mips_hpt_frequency) @@ -32,5 +38,7 @@ int __init init_r4k_clocksource(void) clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); + sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); + return 0; } diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c index 6ecb77d82063..d915652b4d56 100644 --- a/arch/mips/kernel/csrc-sb1250.c +++ b/arch/mips/kernel/csrc-sb1250.c @@ -10,12 +10,9 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/clocksource.h> +#include <linux/sched_clock.h> #include <asm/addrspace.h> #include <asm/io.h> @@ -33,15 +30,22 @@ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. */ -static cycle_t sb1250_hpt_read(struct clocksource *cs) +static inline cycle_t sb1250_hpt_get_cycles(void) { unsigned int count; + void __iomem *addr; - count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT)))); + addr = IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT)); + count = G_SCD_TIMER_CNT(__raw_readq(addr)); return SB1250_HPT_VALUE - count; } +static cycle_t sb1250_hpt_read(struct clocksource *cs) +{ + return sb1250_hpt_get_cycles(); +} + struct clocksource bcm1250_clocksource = { .name = "bcm1250-counter-3", .rating = 200, @@ -50,6 +54,11 @@ struct clocksource bcm1250_clocksource = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +static u64 notrace sb1250_read_sched_clock(void) +{ + return sb1250_hpt_get_cycles(); +} + void __init sb1250_clocksource_init(void) { struct clocksource *cs = &bcm1250_clocksource; @@ -66,4 +75,6 @@ void __init sb1250_clocksource_init(void) R_SCD_TIMER_CFG))); clocksource_register_hz(cs, V_SCD_TIMER_FREQ); + + sched_clock_register(sb1250_read_sched_clock, 23, V_SCD_TIMER_FREQ); } diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index d2c09f6475c5..be4899f3c393 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -131,16 +131,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, return 0; } -static inline unsigned get_fp_abi(int in_abi) -{ - /* If the ABI requirement is provided, simply return that */ - if (in_abi != MIPS_ABI_FP_UNKNOWN) - return in_abi; - - /* Unknown ABI */ - return MIPS_ABI_FP_UNKNOWN; -} - int arch_check_elf(void *_ehdr, bool has_interpreter, struct arch_elf_state *state) { @@ -151,10 +141,10 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) return 0; - fp_abi = get_fp_abi(state->fp_abi); + fp_abi = state->fp_abi; if (has_interpreter) { - interp_fp_abi = get_fp_abi(state->interp_fp_abi); + interp_fp_abi = state->interp_fp_abi; abi0 = min(fp_abi, interp_fp_abi); abi1 = max(fp_abi, interp_fp_abi); diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index af41ba6db960..7791840cf22c 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -10,6 +10,7 @@ #include <asm/asm.h> #include <asm/asmmacro.h> +#include <asm/compiler.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> @@ -185,7 +186,7 @@ syscall_exit_work: * For C code use the inline version named instruction_hazard(). */ LEAF(mips_ihb) - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW jr.hb ra nop END(mips_ihb) diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 368c88b7eb6c..e4f62b7875d2 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -176,6 +176,17 @@ void __init check_wait(void) cpu_wait = rm7k_wait_irqoff; break; + case CPU_PROAPTIV: + case CPU_P5600: + /* + * Incoming Fast Debug Channel (FDC) data during a wait + * instruction causes the wait never to resume, even if an + * interrupt is received. Avoid using wait at all if FDC data is + * likely to be received. + */ + if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY)) + break; + /* fall through */ case CPU_M14KC: case CPU_M14KEC: case CPU_24K: @@ -183,8 +194,6 @@ void __init check_wait(void) case CPU_1004K: case CPU_1074K: case CPU_INTERAPTIV: - case CPU_PROAPTIV: - case CPU_P5600: case CPU_M5150: case CPU_QEMU_GENERIC: cpu_wait = r4k_wait; diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index 64d17e41093b..f2977f00911b 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -187,7 +187,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) } /** - * movt_func - Emulate a MOVT instruction + * movf_func - Emulate a MOVF instruction * @regs: Process register set * @ir: Instruction * @@ -200,9 +200,12 @@ static int movf_func(struct pt_regs *regs, u32 ir) csr = current->thread.fpu.fcr31; cond = fpucondbit[MIPSInst_RT(ir) >> 2]; + if (((csr & cond) == 0) && MIPSInst_RD(ir)) regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + return 0; } @@ -895,8 +898,9 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, * mipsr2_decoder: Decode and emulate a MIPS R2 instruction * @regs: Process register set * @inst: Instruction to decode and emulate + * @fcr31: Floating Point Control and Status Register returned */ -int mipsr2_decoder(struct pt_regs *regs, u32 inst) +int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) { int err = 0; unsigned long vaddr; @@ -1165,6 +1169,13 @@ fpu_emul: err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, &fault_addr); + *fcr31 = current->thread.fpu.fcr31; + + /* + * We can't allow the emulated instruction to leave any of + * the cause bits set in $fcr31. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* * this is a tricky issue - lose_fpu() uses LL/SC atomics diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 9466184d0039..cc1b6fadf089 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -558,8 +558,10 @@ static int mipspmu_get_irq(void) if (mipspmu.irq >= 0) { /* Request my own irq handler. */ err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, - IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD, - "mips_perf_pmu", NULL); + IRQF_PERCPU | IRQF_NOBALANCING | + IRQF_NO_THREAD | IRQF_NO_SUSPEND | + IRQF_SHARED, + "mips_perf_pmu", &mipspmu); if (err) { pr_warn("Unable to request IRQ%d for MIPS performance counters!\n", mipspmu.irq); @@ -582,7 +584,7 @@ static int mipspmu_get_irq(void) static void mipspmu_free_irq(void) { if (mipspmu.irq >= 0) - free_irq(mipspmu.irq, NULL); + free_irq(mipspmu.irq, &mipspmu); else if (cp0_perfcount_irq < 0) perf_irq = save_perf_irq; } @@ -775,6 +777,7 @@ static int n_counters(void) case CPU_R12000: case CPU_R14000: + case CPU_R16000: counters = 4; break; @@ -822,6 +825,13 @@ static const struct mips_perf_event mipsxxcore_event_map2 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, }; +static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD }, +}; + static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, @@ -1005,6 +1015,61 @@ static const struct mips_perf_event mipsxxcore_cache_map2 }, }; +static const struct mips_perf_event loongson3_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD }, + }, +}, +}; + /* BMIPS5000 */ static const struct mips_perf_event bmips5000_cache_map [PERF_COUNT_HW_CACHE_MAX] @@ -1539,6 +1604,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) else raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN; + break; + case CPU_LOONGSON3: + raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN; + break; } raw_event.event_id = base_id; @@ -1615,8 +1684,7 @@ init_hw_perf_events(void) if (get_c0_perfcount_int) irq = get_c0_perfcount_int(); - else if ((cp0_perfcount_irq >= 0) && - (cp0_compare_irq != cp0_perfcount_irq)) + else if (cp0_perfcount_irq >= 0) irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; else irq = -1; @@ -1669,6 +1737,11 @@ init_hw_perf_events(void) mipspmu.general_event_map = &mipsxxcore_event_map; mipspmu.cache_event_map = &mipsxxcore_cache_map; break; + case CPU_LOONGSON3: + mipspmu.name = "mips/loongson3"; + mipspmu.general_event_map = &loongson3_event_map; + mipspmu.cache_event_map = &loongson3_cache_map; + break; case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 130af7d26a9c..298b2b773d12 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -120,6 +120,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_msa) seq_printf(m, "%s", " msa"); if (cpu_has_eva) seq_printf(m, "%s", " eva"); if (cpu_has_htw) seq_printf(m, "%s", " htw"); + if (cpu_has_xpa) seq_printf(m, "%s", " xpa"); seq_printf(m, "\n"); if (cpu_has_mmips) { diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index bf85cc180d91..d295bd1e4996 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -107,8 +107,11 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) return 0; } +/* + * Copy architecture-specific thread state + */ int copy_thread(unsigned long clone_flags, unsigned long usp, - unsigned long arg, struct task_struct *p) + unsigned long kthread_arg, struct task_struct *p) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs, *regs = current_pt_regs(); @@ -123,11 +126,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childksp = (unsigned long) childregs; p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); if (unlikely(p->flags & PF_KTHREAD)) { + /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); ti->addr_limit = KERNEL_DS; p->thread.reg16 = usp; /* fn */ - p->thread.reg17 = arg; + p->thread.reg17 = kthread_arg; p->thread.reg29 = childksp; p->thread.reg31 = (unsigned long) ret_from_kernel_thread; #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) @@ -139,6 +143,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs->cp0_status = status; return 0; } + + /* user thread */ *childregs = *regs; childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[2] = 0; /* Child gets zero as return value */ diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c index 452d4350ce42..e303cb1ef2f4 100644 --- a/arch/mips/kernel/prom.c +++ b/arch/mips/kernel/prom.c @@ -64,7 +64,10 @@ int __init __dt_register_buses(const char *bus0, const char *bus1) panic("device tree not present"); strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible)); - strlcpy(of_ids[1].compatible, bus1, sizeof(of_ids[1].compatible)); + if (bus1) { + strlcpy(of_ids[1].compatible, bus1, + sizeof(of_ids[1].compatible)); + } if (of_platform_populate(NULL, of_ids, NULL, NULL)) panic("failed to populate DT"); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 7da6e324dd35..d544e774eea6 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -32,6 +32,7 @@ #include <asm/byteorder.h> #include <asm/cpu.h> +#include <asm/cpu-info.h> #include <asm/dsp.h> #include <asm/fpu.h> #include <asm/mipsregs.h> @@ -157,6 +158,9 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) { union fpureg *fregs; u64 fpr_val; + u32 fcr31; + u32 value; + u32 mask; int i; if (!access_ok(VERIFY_READ, data, 33 * 8)) @@ -170,8 +174,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) set_fpr64(&fregs[i], 0, fpr_val); } - __get_user(child->thread.fpu.fcr31, data + 64); - child->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + __get_user(value, data + 64); + fcr31 = child->thread.fpu.fcr31; + mask = current_cpu_data.fpu_msk31; + child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); /* FIR may not be written. */ diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 435ea652f5fa..5087a4b72e6b 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -115,11 +115,9 @@ LEAF(_restore_fp) * the property that no matter whether considered as single or as double * precision represents signaling NANS. * - * We initialize fcr31 to rounding to nearest, no exceptions. + * The value to initialize fcr31 to comes in $a0. */ -#define FPU_DEFAULT 0x00000000 - .set push SET_HARDFLOAT @@ -129,8 +127,7 @@ LEAF(_init_fpu) or t0, t1 mtc0 t0, CP0_STATUS - li t1, FPU_DEFAULT - ctc1 t1, fcr31 + ctc1 a0, fcr31 li t0, -1 diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 3b1a36f13a7d..04cbbde3521b 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -165,11 +165,9 @@ LEAF(_init_msa_upper) * the property that no matter whether considered as single or as double * precision represents signaling NANS. * - * We initialize fcr31 to rounding to nearest, no exceptions. + * The value to initialize fcr31 to comes in $a0. */ -#define FPU_DEFAULT 0x00000000 - .set push SET_HARDFLOAT @@ -180,8 +178,7 @@ LEAF(_init_fpu) mtc0 t0, CP0_STATUS enable_fpu_hazard - li t1, FPU_DEFAULT - ctc1 t1, fcr31 + ctc1 a0, fcr31 li t1, -1 # SNaN diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c index 07fc5244aed4..7c746d3458e7 100644 --- a/arch/mips/kernel/reset.c +++ b/arch/mips/kernel/reset.c @@ -11,6 +11,7 @@ #include <linux/pm.h> #include <linux/types.h> #include <linux/reboot.h> +#include <linux/delay.h> #include <asm/reboot.h> @@ -29,16 +30,40 @@ void machine_restart(char *command) { if (_machine_restart) _machine_restart(command); + +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); + mdelay(1000); + pr_emerg("Reboot failed -- System halted\n"); + local_irq_disable(); + while (1); } void machine_halt(void) { if (_machine_halt) _machine_halt(); + +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + local_irq_disable(); + while (1); } void machine_power_off(void) { if (pm_power_off) pm_power_off(); + +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + local_irq_disable(); + while (1); } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 058929041368..be73c491182b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -31,6 +31,7 @@ #include <asm/bootinfo.h> #include <asm/bugs.h> #include <asm/cache.h> +#include <asm/cdmm.h> #include <asm/cpu.h> #include <asm/sections.h> #include <asm/setup.h> @@ -763,6 +764,7 @@ void __init setup_arch(char **cmdline_p) cpu_probe(); prom_init(); + setup_early_fdc_console(); #ifdef CONFIG_EARLY_PRINTK setup_early_printk(); #endif diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index bed7590e475f..d5589bedd0a4 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -88,6 +88,12 @@ static void __init cps_smp_setup(void) /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); + +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(0, mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ } static void __init cps_prepare_cpus(unsigned int max_cpus) diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 1c0d8c50b7e1..5b020bda3e05 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -176,10 +176,8 @@ static void stop_this_cpu(void *dummy) * Remove this CPU: */ set_cpu_online(smp_processor_id(), false); - for (;;) { - if (cpu_wait) - (*cpu_wait)(); /* Wait if available. */ - } + local_irq_disable(); + while (1); } void smp_send_stop(void) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 5b4d711f878d..e334c641a81b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -12,6 +12,7 @@ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2014, Imagination Technologies Ltd. */ +#include <linux/bitops.h> #include <linux/bug.h> #include <linux/compiler.h> #include <linux/context_tracking.h> @@ -699,36 +700,60 @@ asmlinkage void do_ov(struct pt_regs *regs) exception_exit(prev_state); } -int process_fpemu_return(int sig, void __user *fault_addr) +int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) { - /* - * We can't allow the emulated instruction to leave any of the cause - * bits set in FCSR. If they were then the kernel would take an FP - * exception when restoring FP context. - */ - current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + struct siginfo si = { 0 }; + + switch (sig) { + case 0: + return 0; - if (sig == SIGSEGV || sig == SIGBUS) { - struct siginfo si = {0}; + case SIGFPE: si.si_addr = fault_addr; si.si_signo = sig; - if (sig == SIGSEGV) { - down_read(¤t->mm->mmap_sem); - if (find_vma(current->mm, (unsigned long)fault_addr)) - si.si_code = SEGV_ACCERR; - else - si.si_code = SEGV_MAPERR; - up_read(¤t->mm->mmap_sem); - } else { - si.si_code = BUS_ADRERR; - } + /* + * Inexact can happen together with Overflow or Underflow. + * Respect the mask to deliver the correct exception. + */ + fcr31 &= (fcr31 & FPU_CSR_ALL_E) << + (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)); + if (fcr31 & FPU_CSR_INV_X) + si.si_code = FPE_FLTINV; + else if (fcr31 & FPU_CSR_DIV_X) + si.si_code = FPE_FLTDIV; + else if (fcr31 & FPU_CSR_OVF_X) + si.si_code = FPE_FLTOVF; + else if (fcr31 & FPU_CSR_UDF_X) + si.si_code = FPE_FLTUND; + else if (fcr31 & FPU_CSR_INE_X) + si.si_code = FPE_FLTRES; + else + si.si_code = __SI_FAULT; + force_sig_info(sig, &si, current); + return 1; + + case SIGBUS: + si.si_addr = fault_addr; + si.si_signo = sig; + si.si_code = BUS_ADRERR; + force_sig_info(sig, &si, current); + return 1; + + case SIGSEGV: + si.si_addr = fault_addr; + si.si_signo = sig; + down_read(¤t->mm->mmap_sem); + if (find_vma(current->mm, (unsigned long)fault_addr)) + si.si_code = SEGV_ACCERR; + else + si.si_code = SEGV_MAPERR; + up_read(¤t->mm->mmap_sem); force_sig_info(sig, &si, current); return 1; - } else if (sig) { + + default: force_sig(sig, current); return 1; - } else { - return 0; } } @@ -736,7 +761,8 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode, unsigned long old_epc, unsigned long old_ra) { union mips_instruction inst = { .word = opcode }; - void __user *fault_addr = NULL; + void __user *fault_addr; + unsigned long fcr31; int sig; /* If it's obviously not an FP instruction, skip it */ @@ -766,13 +792,20 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode, /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); + fcr31 = current->thread.fpu.fcr31; - /* If something went wrong, signal */ - process_fpemu_return(sig, fault_addr); + /* + * We can't allow the emulated instruction to leave any of + * the cause bits set in $fcr31. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* Restore the hardware register state */ own_fpu(1); + /* Send a signal if required. */ + process_fpemu_return(sig, fault_addr, fcr31); + return 0; } @@ -782,7 +815,8 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode, asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) { enum ctx_state prev_state; - siginfo_t info = {0}; + void __user *fault_addr; + int sig; prev_state = exception_enter(); if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), @@ -796,9 +830,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) die_if_kernel("FP exception in kernel code", regs); if (fcr31 & FPU_CSR_UNI_X) { - int sig; - void __user *fault_addr = NULL; - /* * Unimplemented operation exception. If we've got the full * software emulator on-board, let's use it... @@ -815,30 +846,23 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) /* Run the emulator */ sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); + fcr31 = current->thread.fpu.fcr31; - /* If something went wrong, signal */ - process_fpemu_return(sig, fault_addr); + /* + * We can't allow the emulated instruction to leave any of + * the cause bits set in $fcr31. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; /* Restore the hardware register state */ own_fpu(1); /* Using the FPU again. */ + } else { + sig = SIGFPE; + fault_addr = (void __user *) regs->cp0_epc; + } - goto out; - } else if (fcr31 & FPU_CSR_INV_X) - info.si_code = FPE_FLTINV; - else if (fcr31 & FPU_CSR_DIV_X) - info.si_code = FPE_FLTDIV; - else if (fcr31 & FPU_CSR_OVF_X) - info.si_code = FPE_FLTOVF; - else if (fcr31 & FPU_CSR_UDF_X) - info.si_code = FPE_FLTUND; - else if (fcr31 & FPU_CSR_INE_X) - info.si_code = FPE_FLTRES; - else - info.si_code = __SI_FAULT; - info.si_signo = SIGFPE; - info.si_errno = 0; - info.si_addr = (void __user *) regs->cp0_epc; - force_sig_info(SIGFPE, &info, current); + /* Send a signal if required. */ + process_fpemu_return(sig, fault_addr, fcr31); out: exception_exit(prev_state); @@ -885,9 +909,9 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, break; case BRK_MEMU: /* - * Address errors may be deliberately induced by the FPU - * emulator to retake control of the CPU after executing the - * instruction in the delay slot of an emulated branch. + * This breakpoint code is used by the FPU emulator to retake + * control of the CPU after executing the instruction from the + * delay slot of an emulated branch. * * Terminate if exception was recognized as a delay slot return * otherwise handle as normal. @@ -907,10 +931,9 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, asmlinkage void do_bp(struct pt_regs *regs) { + unsigned long epc = msk_isa16_mode(exception_epc(regs)); unsigned int opcode, bcode; enum ctx_state prev_state; - unsigned long epc; - u16 instr[2]; mm_segment_t seg; seg = get_fs(); @@ -919,26 +942,28 @@ asmlinkage void do_bp(struct pt_regs *regs) prev_state = exception_enter(); if (get_isa16_mode(regs->cp0_epc)) { - /* Calculate EPC. */ - epc = exception_epc(regs); - if (cpu_has_mmips) { - if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || - (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) - goto out_sigsegv; - opcode = (instr[0] << 16) | instr[1]; - } else { + u16 instr[2]; + + if (__get_user(instr[0], (u16 __user *)epc)) + goto out_sigsegv; + + if (!cpu_has_mmips) { /* MIPS16e mode */ - if (__get_user(instr[0], - (u16 __user *)msk_isa16_mode(epc))) + bcode = (instr[0] >> 5) & 0x3f; + } else if (mm_insn_16bit(instr[0])) { + /* 16-bit microMIPS BREAK */ + bcode = instr[0] & 0xf; + } else { + /* 32-bit microMIPS BREAK */ + if (__get_user(instr[1], (u16 __user *)(epc + 2))) goto out_sigsegv; - bcode = (instr[0] >> 6) & 0x3f; - do_trap_or_bp(regs, bcode, "Break"); - goto out; + opcode = (instr[0] << 16) | instr[1]; + bcode = (opcode >> 6) & ((1 << 20) - 1); } } else { - if (__get_user(opcode, - (unsigned int __user *) exception_epc(regs))) + if (__get_user(opcode, (unsigned int __user *)epc)) goto out_sigsegv; + bcode = (opcode >> 6) & ((1 << 20) - 1); } /* @@ -947,9 +972,8 @@ asmlinkage void do_bp(struct pt_regs *regs) * Gas is bug-compatible, but not always, grrr... * We handle both cases with a simple heuristics. --macro */ - bcode = ((opcode >> 6) & ((1 << 20) - 1)); if (bcode >= (1 << 10)) - bcode >>= 10; + bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10); /* * notify the kprobe handlers, if instruction is likely to @@ -1039,22 +1063,24 @@ asmlinkage void do_ri(struct pt_regs *regs) * as quickly as possible. */ if (mipsr2_emulation && cpu_has_mips_r6 && - likely(user_mode(regs))) { - if (likely(get_user(opcode, epc) >= 0)) { - status = mipsr2_decoder(regs, opcode); - switch (status) { - case 0: - case SIGEMT: - task_thread_info(current)->r2_emul_return = 1; - return; - case SIGILL: - goto no_r2_instr; - default: - process_fpemu_return(status, - ¤t->thread.cp0_baduaddr); - task_thread_info(current)->r2_emul_return = 1; - return; - } + likely(user_mode(regs)) && + likely(get_user(opcode, epc) >= 0)) { + unsigned long fcr31 = 0; + + status = mipsr2_decoder(regs, opcode, &fcr31); + switch (status) { + case 0: + case SIGEMT: + task_thread_info(current)->r2_emul_return = 1; + return; + case SIGILL: + goto no_r2_instr; + default: + process_fpemu_return(status, + ¤t->thread.cp0_baduaddr, + fcr31); + task_thread_info(current)->r2_emul_return = 1; + return; } } @@ -1299,10 +1325,13 @@ asmlinkage void do_cpu(struct pt_regs *regs) enum ctx_state prev_state; unsigned int __user *epc; unsigned long old_epc, old31; + void __user *fault_addr; unsigned int opcode; + unsigned long fcr31; unsigned int cpid; int status, err; unsigned long __maybe_unused flags; + int sig; prev_state = exception_enter(); cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; @@ -1319,7 +1348,7 @@ asmlinkage void do_cpu(struct pt_regs *regs) status = -1; if (unlikely(compute_return_epc(regs) < 0)) - goto out; + break; if (get_isa16_mode(regs->cp0_epc)) { unsigned short mmop[2] = { 0 }; @@ -1352,49 +1381,54 @@ asmlinkage void do_cpu(struct pt_regs *regs) force_sig(status, current); } - goto out; + break; case 3: /* - * Old (MIPS I and MIPS II) processors will set this code - * for COP1X opcode instructions that replaced the original - * COP3 space. We don't limit COP1 space instructions in - * the emulator according to the CPU ISA, so we want to - * treat COP1X instructions consistently regardless of which - * code the CPU chose. Therefore we redirect this trap to - * the FP emulator too. - * - * Then some newer FPU-less processors use this code - * erroneously too, so they are covered by this choice - * as well. + * The COP3 opcode space and consequently the CP0.Status.CU3 + * bit and the CP0.Cause.CE=3 encoding have been removed as + * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs + * up the space has been reused for COP1X instructions, that + * are enabled by the CP0.Status.CU1 bit and consequently + * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable + * exceptions. Some FPU-less processors that implement one + * of these ISAs however use this code erroneously for COP1X + * instructions. Therefore we redirect this trap to the FP + * emulator too. */ - if (raw_cpu_has_fpu) + if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) { + force_sig(SIGILL, current); break; + } /* Fall through. */ case 1: err = enable_restore_fp_context(0); - if (!raw_cpu_has_fpu || err) { - int sig; - void __user *fault_addr = NULL; - sig = fpu_emulator_cop1Handler(regs, - ¤t->thread.fpu, - 0, &fault_addr); - if (!process_fpemu_return(sig, fault_addr) && !err) - mt_ase_fp_affinity(); - } + if (raw_cpu_has_fpu && !err) + break; - goto out; + sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, + &fault_addr); + fcr31 = current->thread.fpu.fcr31; + + /* + * We can't allow the emulated instruction to leave + * any of the cause bits set in $fcr31. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + + /* Send a signal if required. */ + if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) + mt_ase_fp_affinity(); + + break; case 2: raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); - goto out; + break; } - force_sig(SIGILL, current); - -out: exception_exit(prev_state); } @@ -1984,6 +2018,12 @@ int cp0_compare_irq_shift; int cp0_perfcount_irq; EXPORT_SYMBOL_GPL(cp0_perfcount_irq); +/* + * Fast debug channel IRQ or -1 if not present + */ +int cp0_fdc_irq; +EXPORT_SYMBOL_GPL(cp0_fdc_irq); + static int noulri; static int __init ulri_disable(char *s) @@ -2065,17 +2105,21 @@ void per_cpu_trap_init(bool is_boot_cpu) * * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt + * o read IntCtl.IPFDC to determine the fast debug channel interrupt */ if (cpu_has_mips_r2_r6) { cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; - if (cp0_perfcount_irq == cp0_compare_irq) - cp0_perfcount_irq = -1; + cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; + if (!cp0_fdc_irq) + cp0_fdc_irq = -1; + } else { cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; cp0_perfcount_irq = -1; + cp0_fdc_irq = -1; } if (!cpu_data[cpu].asid_cache) diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index bbb69695a0a1..af84bef0c90d 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -89,8 +89,6 @@ #include <asm/fpu_emulator.h> #include <asm/inst.h> #include <asm/uaccess.h> -#include <asm/fpu.h> -#include <asm/fpu_emulator.h> #define STR(x) __STR(x) #define __STR(x) #x @@ -109,10 +107,11 @@ static u32 unaligned_action; extern void show_registers(struct pt_regs *regs); #ifdef __BIG_ENDIAN -#define LoadHW(addr, value, res) \ +#define _LoadHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ (".set\tnoat\n" \ - "1:\t"user_lb("%0", "0(%2)")"\n" \ - "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ + "1:\t"type##_lb("%0", "0(%2)")"\n" \ + "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -127,13 +126,15 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "(%2)")"\n" \ - "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "(%2)")"\n" \ + "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -146,21 +147,24 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no lwl instruction */ -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n" \ ".set\tnoat\n\t" \ - "1:"user_lb("%0", "0(%2)")"\n\t" \ - "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "1:"type##_lb("%0", "0(%2)")"\n\t" \ + "2:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "3:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "4:"type##_lbu("$1", "3(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -178,14 +182,17 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ -#define LoadHWU(addr, value, res) \ +#define _LoadHWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_lbu("%0", "0(%2)")"\n" \ - "2:\t"user_lbu("$1", "1(%2)")"\n\t" \ + "1:\t"type##_lbu("%0", "0(%2)")"\n" \ + "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -201,13 +208,15 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "(%2)")"\n" \ - "2:\t"user_lwr("%0", "3(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "(%2)")"\n" \ + "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ @@ -222,9 +231,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tldl\t%0, (%2)\n" \ "2:\tldr\t%0, 7(%2)\n\t" \ @@ -240,21 +251,24 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has not lwl and ldl instructions */ -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_lbu("%0", "0(%2)")"\n\t" \ - "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "1:"type##_lbu("%0", "0(%2)")"\n\t" \ + "2:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "3:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "4:"type##_lbu("$1", "3(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -272,9 +286,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -319,16 +335,19 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t8b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ -#define StoreHW(addr, value, res) \ +#define _StoreHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_sb("%1", "1(%2)")"\n" \ + "1:\t"type##_sb("%1", "1(%2)")"\n" \ "srl\t$1, %1, 0x8\n" \ - "2:\t"user_sb("$1", "0(%2)")"\n" \ + "2:\t"type##_sb("$1", "0(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -342,13 +361,15 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT));\ +} while(0) #ifndef CONFIG_CPU_MIPSR6 -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ - "1:\t"user_swl("%1", "(%2)")"\n" \ - "2:\t"user_swr("%1", "3(%2)")"\n\t" \ + "1:\t"type##_swl("%1", "(%2)")"\n" \ + "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -361,9 +382,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tsdl\t%1,(%2)\n" \ "2:\tsdr\t%1, 7(%2)\n\t" \ @@ -379,20 +402,23 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no swl and sdl instructions */ -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_sb("%1", "3(%2)")"\n\t" \ + "1:"type##_sb("%1", "3(%2)")"\n\t" \ "srl\t$1, %1, 0x8\n\t" \ - "2:"user_sb("$1", "2(%2)")"\n\t" \ + "2:"type##_sb("$1", "2(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "3:"user_sb("$1", "1(%2)")"\n\t" \ + "3:"type##_sb("$1", "1(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "4:"user_sb("$1", "0(%2)")"\n\t" \ + "4:"type##_sb("$1", "0(%2)")"\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -409,9 +435,11 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) #define StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -451,15 +479,18 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ #else /* __BIG_ENDIAN */ -#define LoadHW(addr, value, res) \ +#define _LoadHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ (".set\tnoat\n" \ - "1:\t"user_lb("%0", "1(%2)")"\n" \ - "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ + "1:\t"type##_lb("%0", "1(%2)")"\n" \ + "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -474,13 +505,15 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "3(%2)")"\n" \ - "2:\t"user_lwr("%0", "(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "3(%2)")"\n" \ + "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ "li\t%1, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -493,21 +526,24 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no lwl instruction */ -#define LoadW(addr, value, res) \ +#define _LoadW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n" \ ".set\tnoat\n\t" \ - "1:"user_lb("%0", "3(%2)")"\n\t" \ - "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "1:"type##_lb("%0", "3(%2)")"\n\t" \ + "2:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "3:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "4:"type##_lbu("$1", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -525,15 +561,18 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ -#define LoadHWU(addr, value, res) \ +#define _LoadHWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_lbu("%0", "1(%2)")"\n" \ - "2:\t"user_lbu("$1", "0(%2)")"\n\t" \ + "1:\t"type##_lbu("%0", "1(%2)")"\n" \ + "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -549,13 +588,15 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #ifndef CONFIG_CPU_MIPSR6 -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ - "1:\t"user_lwl("%0", "3(%2)")"\n" \ - "2:\t"user_lwr("%0", "(%2)")"\n\t" \ + "1:\t"type##_lwl("%0", "3(%2)")"\n" \ + "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ "dsll\t%0, %0, 32\n\t" \ "dsrl\t%0, %0, 32\n\t" \ "li\t%1, 0\n" \ @@ -570,9 +611,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tldl\t%0, 7(%2)\n" \ "2:\tldr\t%0, (%2)\n\t" \ @@ -588,21 +631,24 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has not lwl and ldl instructions */ -#define LoadWU(addr, value, res) \ +#define _LoadWU(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_lbu("%0", "3(%2)")"\n\t" \ - "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "1:"type##_lbu("%0", "3(%2)")"\n\t" \ + "2:"type##_lbu("$1", "2(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "3:"type##_lbu("$1", "1(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ - "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "4:"type##_lbu("$1", "0(%2)")"\n\t" \ "sll\t%0, 0x8\n\t" \ "or\t%0, $1\n\t" \ "li\t%1, 0\n" \ @@ -620,9 +666,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t4b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) -#define LoadDW(addr, value, res) \ +#define _LoadDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -667,15 +715,17 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t8b, 11b\n\t" \ ".previous" \ : "=&r" (value), "=r" (res) \ - : "r" (addr), "i" (-EFAULT)); + : "r" (addr), "i" (-EFAULT)); \ +} while(0) #endif /* CONFIG_CPU_MIPSR6 */ -#define StoreHW(addr, value, res) \ +#define _StoreHW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tnoat\n" \ - "1:\t"user_sb("%1", "0(%2)")"\n" \ + "1:\t"type##_sb("%1", "0(%2)")"\n" \ "srl\t$1,%1, 0x8\n" \ - "2:\t"user_sb("$1", "1(%2)")"\n" \ + "2:\t"type##_sb("$1", "1(%2)")"\n" \ ".set\tat\n\t" \ "li\t%0, 0\n" \ "3:\n\t" \ @@ -689,12 +739,15 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT));\ +} while(0) + #ifndef CONFIG_CPU_MIPSR6 -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ - "1:\t"user_swl("%1", "3(%2)")"\n" \ - "2:\t"user_swr("%1", "(%2)")"\n\t" \ + "1:\t"type##_swl("%1", "3(%2)")"\n" \ + "2:\t"type##_swr("%1", "(%2)")"\n\t"\ "li\t%0, 0\n" \ "3:\n\t" \ ".insn\n\t" \ @@ -707,9 +760,11 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ "1:\tsdl\t%1, 7(%2)\n" \ "2:\tsdr\t%1, (%2)\n\t" \ @@ -725,20 +780,23 @@ extern void show_registers(struct pt_regs *regs); STR(PTR)"\t2b, 4b\n\t" \ ".previous" \ : "=r" (res) \ - : "r" (value), "r" (addr), "i" (-EFAULT)); + : "r" (value), "r" (addr), "i" (-EFAULT)); \ +} while(0) + #else /* MIPSR6 has no swl and sdl instructions */ -#define StoreW(addr, value, res) \ +#define _StoreW(addr, value, res, type) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ - "1:"user_sb("%1", "0(%2)")"\n\t" \ + "1:"type##_sb("%1", "0(%2)")"\n\t" \ "srl\t$1, %1, 0x8\n\t" \ - "2:"user_sb("$1", "1(%2)")"\n\t" \ + "2:"type##_sb("$1", "1(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "3:"user_sb("$1", "2(%2)")"\n\t" \ + "3:"type##_sb("$1", "2(%2)")"\n\t" \ "srl\t$1, $1, 0x8\n\t" \ - "4:"user_sb("$1", "3(%2)")"\n\t" \ + "4:"type##_sb("$1", "3(%2)")"\n\t" \ ".set\tpop\n\t" \ "li\t%0, 0\n" \ "10:\n\t" \ @@ -755,9 +813,11 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) -#define StoreDW(addr, value, res) \ +#define _StoreDW(addr, value, res) \ +do { \ __asm__ __volatile__ ( \ ".set\tpush\n\t" \ ".set\tnoat\n\t" \ @@ -797,10 +857,28 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT) \ - : "memory"); + : "memory"); \ +} while(0) + #endif /* CONFIG_CPU_MIPSR6 */ #endif +#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) +#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user) +#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel) +#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user) +#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel) +#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user) +#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel) +#define LoadWE(addr, value, res) _LoadW(addr, value, res, user) +#define LoadDW(addr, value, res) _LoadDW(addr, value, res) + +#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel) +#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user) +#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel) +#define StoreWE(addr, value, res) _StoreW(addr, value, res, user) +#define StoreDW(addr, value, res) _StoreDW(addr, value, res) + static void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int __user *pc) { @@ -872,7 +950,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, set_fs(seg); goto sigbus; } - LoadHW(addr, value, res); + LoadHWE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -885,7 +963,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, set_fs(seg); goto sigbus; } - LoadW(addr, value, res); + LoadWE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -898,7 +976,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, set_fs(seg); goto sigbus; } - LoadHWU(addr, value, res); + LoadHWUE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -913,7 +991,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; - StoreHW(addr, value, res); + StoreHWE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -926,7 +1004,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; - StoreW(addr, value, res); + StoreWE(addr, value, res); if (res) { set_fs(seg); goto fault; @@ -943,7 +1021,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - LoadHW(addr, value, res); + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + LoadHW(addr, value, res); + else + LoadHWE(addr, value, res); + } else { + LoadHW(addr, value, res); + } + if (res) goto fault; compute_return_epc(regs); @@ -954,7 +1040,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; - LoadW(addr, value, res); + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + LoadW(addr, value, res); + else + LoadWE(addr, value, res); + } else { + LoadW(addr, value, res); + } + if (res) goto fault; compute_return_epc(regs); @@ -965,7 +1059,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - LoadHWU(addr, value, res); + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + LoadHWU(addr, value, res); + else + LoadHWUE(addr, value, res); + } else { + LoadHWU(addr, value, res); + } + if (res) goto fault; compute_return_epc(regs); @@ -1024,7 +1126,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - StoreHW(addr, value, res); + + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + StoreHW(addr, value, res); + else + StoreHWE(addr, value, res); + } else { + StoreHW(addr, value, res); + } + if (res) goto fault; break; @@ -1035,7 +1146,16 @@ static void emulate_load_store_insn(struct pt_regs *regs, compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - StoreW(addr, value, res); + + if (config_enabled(CONFIG_EVA)) { + if (segment_eq(get_fs(), get_ds())) + StoreW(addr, value, res); + else + StoreWE(addr, value, res); + } else { + StoreW(addr, value, res); + } + if (res) goto fault; break; @@ -1076,7 +1196,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, own_fpu(1); /* Restore FPU state. */ /* Signal if something went wrong. */ - process_fpemu_return(res, fault_addr); + process_fpemu_return(res, fault_addr, 0); if (res == 0) break; @@ -1511,7 +1631,7 @@ fpu_emul: own_fpu(1); /* restore FPU state */ /* If something went wrong, signal */ - process_fpemu_return(res, fault_addr); + process_fpemu_return(res, fault_addr, 0); if (res == 0) goto success; |