summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-02-26 12:24:50 +0100
committerIngo Molnar <mingo@kernel.org>2015-02-26 12:24:50 +0100
commite9e4e44309f866b115d08ab4a54834008c50a8a4 (patch)
treeae9f91e682a4d6592ef263f30a4a0b1a862b7987 /arch/mips/include/asm
parentMerge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
parentLinux 4.0-rc1 (diff)
downloadlinux-e9e4e44309f866b115d08ab4a54834008c50a8a4.tar.xz
linux-e9e4e44309f866b115d08ab4a54834008c50a8a4.zip
Merge tag 'v4.0-rc1' into perf/core, to refresh the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/asmmacro.h18
-rw-r--r--arch/mips/include/asm/atomic.h42
-rw-r--r--arch/mips/include/asm/bitops.h64
-rw-r--r--arch/mips/include/asm/checksum.h45
-rw-r--r--arch/mips/include/asm/cmpxchg.h34
-rw-r--r--arch/mips/include/asm/compiler.h24
-rw-r--r--arch/mips/include/asm/cpu-features.h28
-rw-r--r--arch/mips/include/asm/cpu-info.h5
-rw-r--r--arch/mips/include/asm/cpu-type.h7
-rw-r--r--arch/mips/include/asm/cpu.h11
-rw-r--r--arch/mips/include/asm/edac.h4
-rw-r--r--arch/mips/include/asm/elf.h10
-rw-r--r--arch/mips/include/asm/fpu.h46
-rw-r--r--arch/mips/include/asm/futex.h24
-rw-r--r--arch/mips/include/asm/fw/arc/hinv.h6
-rw-r--r--arch/mips/include/asm/gio_device.h2
-rw-r--r--arch/mips/include/asm/hazards.h9
-rw-r--r--arch/mips/include/asm/irqflags.h7
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/local.h5
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h64
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/war.h3
-rw-r--r--arch/mips/include/asm/mach-jz4740/jz4740_nand.h2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h24
-rw-r--r--arch/mips/include/asm/mips-cm.h4
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h96
-rw-r--r--arch/mips/include/asm/mipsregs.h19
-rw-r--r--arch/mips/include/asm/mmu.h3
-rw-r--r--arch/mips/include/asm/mmu_context.h9
-rw-r--r--arch/mips/include/asm/module.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rst-defs.h306
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h17
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h107
-rw-r--r--arch/mips/include/asm/octeon/octeon.h153
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/pgtable-32.h38
-rw-r--r--arch/mips/include/asm/pgtable-64.h9
-rw-r--r--arch/mips/include/asm/pgtable-bits.h92
-rw-r--r--arch/mips/include/asm/pgtable.h48
-rw-r--r--arch/mips/include/asm/processor.h19
-rw-r--r--arch/mips/include/asm/prom.h7
-rw-r--r--arch/mips/include/asm/ptrace.h4
-rw-r--r--arch/mips/include/asm/r4kcache.h150
-rw-r--r--arch/mips/include/asm/sgialib.h8
-rw-r--r--arch/mips/include/asm/siginfo.h29
-rw-r--r--arch/mips/include/asm/spinlock.h55
-rw-r--r--arch/mips/include/asm/spram.h4
-rw-r--r--arch/mips/include/asm/stackframe.h8
-rw-r--r--arch/mips/include/asm/switch_to.h9
-rw-r--r--arch/mips/include/asm/syscall.h8
-rw-r--r--arch/mips/include/asm/thread_info.h7
53 files changed, 1214 insertions, 489 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 200efeac4181..526539cbc99f 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,4 +1,5 @@
# MIPS headers
+generic-(CONFIG_GENERIC_CSUM) += checksum.h
generic-y += cputime.h
generic-y += current.h
generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6caf8766b80f..0cae4595e985 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,7 +19,7 @@
#include <asm/asmmacro-64.h>
#endif
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
@@ -104,7 +104,8 @@
.endm
.macro fpu_save_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+ defined(CONFIG_CPU_MIPS32_R6)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
@@ -160,7 +161,8 @@
.endm
.macro fpu_restore_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+ defined(CONFIG_CPU_MIPS32_R6)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?
@@ -170,16 +172,16 @@
fpu_restore_16even \thread \tmp
.endm
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
.macro _EXT rd, rs, p, s
ext \rd, \rs, \p, \s
.endm
-#else /* !CONFIG_CPU_MIPSR2 */
+#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
.macro _EXT rd, rs, p, s
srl \rd, \rs, \p
andi \rd, \rd, (1 << \s) - 1
.endm
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
/*
* Temporary until all gas have MT ASE support
@@ -304,7 +306,7 @@
.set push
.set noat
SET_HARDFLOAT
- add $1, \base, \off
+ addu $1, \base, \off
.word LDD_MSA_INSN | (\wd << 6)
.set pop
.endm
@@ -313,7 +315,7 @@
.set push
.set noat
SET_HARDFLOAT
- add $1, \base, \off
+ addu $1, \base, \off
.word STD_MSA_INSN | (\wd << 6)
.set pop
.endm
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 857da84cfc92..26d436336f2e 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" sc %0, %1 \n" \
" beqzl %0, 1b \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
" ll %0, %1 # atomic_" #op "\n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF12_ASM() (v->counter) \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
\
do { \
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
" ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF12_ASM() (v->counter) \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!result)); \
\
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
- "+" GCC_OFF12_ASM() (v->counter)
- : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
int temp;
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
- "+" GCC_OFF12_ASM() (v->counter)
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i));
} else {
unsigned long flags;
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
" scd %0, %1 \n" \
" beqzl %0, 1b \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
" lld %0, %1 # atomic64_" #op "\n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF12_ASM() (v->counter) \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
\
do { \
__asm__ __volatile__( \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
" lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" .set mips0 \n" \
: "=&r" (result), "=&r" (temp), \
- "=" GCC_OFF12_ASM() (v->counter) \
- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
: "memory"); \
} while (unlikely(!result)); \
\
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
- "=" GCC_OFF12_ASM() (v->counter)
- : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+ "=" GCC_OFF_SMALL_ASM() (v->counter)
+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
long temp;
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_LEVEL" \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp),
- "+" GCC_OFF12_ASM() (v->counter)
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i));
} else {
unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 6663bcca9d0c..9f935f6aa996 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
- : "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
-#ifdef CONFIG_CPU_MIPSR2
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+ : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
" " __LL "%0, %1 # set_bit \n"
" " __INS "%0, %3, %2, 1 \n"
" " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (bit), "r" (~0));
} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
" " __LL "%0, %1 # clear_bit \n"
" " __INS "%0, $0, %2, 1 \n"
" " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (bit));
} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit));
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
" " __EXT "%2, %0, %3, 1 \n"
" " __INS "%0, $0, %3, 1 \n"
" " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "ir" (bit)
: "memory");
} while (unlikely(!temp));
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
" .set mips0 \n"
- : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
} while (unlikely(!res));
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word)
__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
- " .set mips32 \n"
+ " .set "MIPS_ISA_LEVEL" \n"
" clz %0, %1 \n"
" .set pop \n"
: "=r" (num)
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word)
__builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
__asm__(
" .set push \n"
- " .set mips64 \n"
+ " .set "MIPS_ISA_LEVEL" \n"
" dclz %0, %1 \n"
" .set pop \n"
: "=r" (num)
@@ -562,7 +562,7 @@ static inline int fls(int x)
if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
- " .set mips32 \n"
+ " .set "MIPS_ISA_LEVEL" \n"
" clz %0, %1 \n"
" .set pop \n"
: "=r" (x)
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 3418c51e1151..5c585c5c1c3e 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -12,6 +12,10 @@
#ifndef _ASM_CHECKSUM_H
#define _ASM_CHECKSUM_H
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
#include <linux/in6.h>
#include <asm/uaccess.h>
@@ -99,27 +103,23 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
*/
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
+#define csum_partial_copy_nocheck csum_partial_copy_nocheck
/*
* Fold a partial checksum without adding pseudo headers
*/
-static inline __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum csum)
{
- __asm__(
- " .set push # csum_fold\n"
- " .set noat \n"
- " sll $1, %0, 16 \n"
- " addu %0, $1 \n"
- " sltu $1, %0, $1 \n"
- " srl %0, %0, 16 \n"
- " addu %0, $1 \n"
- " xori %0, 0xffff \n"
- " .set pop"
- : "=r" (sum)
- : "0" (sum));
+ u32 sum = (__force u32)csum;;
- return (__force __sum16)sum;
+ sum += (sum << 16);
+ csum = (sum < csum);
+ sum >>= 16;
+ sum += csum;
+
+ return (__force __sum16)~sum;
}
+#define csum_fold csum_fold
/*
* This is a version of ip_compute_csum() optimized for IP headers,
@@ -158,6 +158,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return csum_fold(csum);
}
+#define ip_fast_csum ip_fast_csum
static inline __wsum csum_tcpudp_nofold(__be32 saddr,
__be32 daddr, unsigned short len, unsigned short proto,
@@ -200,18 +201,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
return sum;
}
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
-{
- return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
/*
* this routine is used for miscellaneous IP-like checksums, mainly
@@ -287,4 +277,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
return csum_fold(sum);
}
+#include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
+
#endif /* _ASM_CHECKSUM_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 28b1edf19501..d0a2a68ca600 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
" sc %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
- : GCC_OFF12_ASM() (*m), "Jr" (val)
+ : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" sc %2, %1 \n"
" .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+ : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
"=&r" (dummy)
- : GCC_OFF12_ASM() (*m), "Jr" (val)
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
" scd %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
- : GCC_OFF12_ASM() (*m), "Jr" (val)
+ : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
do {
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
" .set mips0 \n"
- : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+ : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
"=&r" (dummy)
- : GCC_OFF12_ASM() (*m), "Jr" (val)
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" beqzl $1, 1b \n" \
"2: \n" \
" .set pop \n" \
- : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
- : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else if (kernel_uses_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
" beqz $1, 1b \n" \
" .set pop \n" \
"2: \n" \
- : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
- : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else { \
unsigned long __flags; \
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index c73815e0123a..e081a265f422 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -16,12 +16,30 @@
#define GCC_REG_ACCUM "accum"
#endif
+#ifdef CONFIG_CPU_MIPSR6
+/* All MIPS R6 toolchains support the ZC constrain */
+#define GCC_OFF_SMALL_ASM() "ZC"
+#else
#ifndef CONFIG_CPU_MICROMIPS
-#define GCC_OFF12_ASM() "R"
+#define GCC_OFF_SMALL_ASM() "R"
#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
-#define GCC_OFF12_ASM() "ZC"
+#define GCC_OFF_SMALL_ASM() "ZC"
#else
#error "microMIPS compilation unsupported with GCC older than 4.9"
-#endif
+#endif /* CONFIG_CPU_MICROMIPS */
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 2897cfafcaf0..0d8208de9a3f 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -38,6 +38,9 @@
#ifndef cpu_has_maar
#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR)
#endif
+#ifndef cpu_has_rw_llb
+#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#endif
/*
* For the moment we don't consider R6000 and R8000 so we can assume that
@@ -171,6 +174,9 @@
#endif
#endif
+#ifndef cpu_has_mips_1
+# define cpu_has_mips_1 (!cpu_has_mips_r6)
+#endif
#ifndef cpu_has_mips_2
# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
#endif
@@ -189,12 +195,18 @@
#ifndef cpu_has_mips32r2
# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
#endif
+#ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+#endif
#ifndef cpu_has_mips64r1
# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
#endif
#ifndef cpu_has_mips64r2
# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
#endif
+#ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+#endif
/*
* Shortcuts ...
@@ -208,17 +220,23 @@
#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
-#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2)
+#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \
+ cpu_has_mips_r6)
-#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6)
#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
- cpu_has_mips64r1 | cpu_has_mips64r2)
+ cpu_has_mips32r6 | cpu_has_mips64r1 | \
+ cpu_has_mips64r2 | cpu_has_mips64r6)
+
+/* MIPSR2 and MIPSR6 have a lot of similarities */
+#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
#ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
#endif
/*
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a6c9ccb33c5c..c3f4f2d2e108 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -84,6 +84,11 @@ struct cpuinfo_mips {
* (shifted by _CACHE_SHIFT)
*/
unsigned int writecombine;
+ /*
+ * Simple counter to prevent enabling HTW in nested
+ * htw_start/htw_stop calls
+ */
+ unsigned int htw_seq;
} __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[];
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index b4e2bd87df50..8245875f8b33 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_M5150:
#endif
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
+ defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
+ case CPU_QEMU_GENERIC:
+#endif
+
#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
case CPU_5KC:
case CPU_5KE:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 33866fce4d63..15687234d70a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -93,6 +93,7 @@
* These are the PRID's for when 23:16 == PRID_COMP_MIPS
*/
+#define PRID_IMP_QEMU_GENERIC 0x0000
#define PRID_IMP_4KC 0x8000
#define PRID_IMP_5KC 0x8100
#define PRID_IMP_20KC 0x8200
@@ -312,6 +313,8 @@ enum cpu_type_enum {
CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
+ CPU_QEMU_GENERIC,
+
CPU_LAST
};
@@ -329,11 +332,14 @@ enum cpu_type_enum {
#define MIPS_CPU_ISA_M32R2 0x00000020
#define MIPS_CPU_ISA_M64R1 0x00000040
#define MIPS_CPU_ISA_M64R2 0x00000080
+#define MIPS_CPU_ISA_M32R6 0x00000100
+#define MIPS_CPU_ISA_M64R6 0x00000200
#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
- MIPS_CPU_ISA_M32R2)
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
- MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+ MIPS_CPU_ISA_M64R6)
/*
* CPU Option encodings
@@ -370,6 +376,7 @@ enum cpu_type_enum {
#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */
/*
* CPU ASE encodings
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index ae6fedcb0060..94105d3f58f4 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
" sc %0, %1 \n"
" beqz %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
- : GCC_OFF12_ASM() (*virt_addr));
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
+ : GCC_OFF_SMALL_ASM() (*virt_addr));
virt_addr++;
}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index eb4d95de619c..535f196ffe02 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
struct arch_elf_state {
int fp_abi;
int interp_fp_abi;
- int overall_abi;
+ int overall_fp_mode;
};
+#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */
+
#define INIT_ARCH_ELF_STATE { \
- .fp_abi = -1, \
- .interp_fp_abi = -1, \
- .overall_abi = -1, \
+ .fp_abi = MIPS_ABI_FP_UNKNOWN, \
+ .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \
+ .overall_fp_mode = -1, \
}
extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 994d21939676..dd083e999b08 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -64,18 +64,21 @@ static inline int __enable_fpu(enum fpu_mode mode)
return SIGFPE;
/* set FRE */
- write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE);
+ set_c0_config5(MIPS_CONF5_FRE);
goto fr_common;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+ || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
/* fall through */
case FPU_32BIT:
- /* clear FRE */
- write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE);
+ if (cpu_has_fre) {
+ /* clear FRE */
+ clear_c0_config5(MIPS_CONF5_FRE);
+ }
fr_common:
/* set CU1 & change FR appropriately */
fr = (int)mode & FPU_FR_MASK;
@@ -182,25 +185,32 @@ static inline int init_fpu(void)
int ret = 0;
if (cpu_has_fpu) {
+ unsigned int config5;
+
ret = __own_fpu();
- if (!ret) {
- unsigned int config5 = read_c0_config5();
-
- /*
- * Ensure FRE is clear whilst running _init_fpu, since
- * single precision FP instructions are used. If FRE
- * was set then we'll just end up initialising all 32
- * 64b registers.
- */
- write_c0_config5(config5 & ~MIPS_CONF5_FRE);
- enable_fpu_hazard();
+ if (ret)
+ return ret;
+ if (!cpu_has_fre) {
_init_fpu();
- /* Restore FRE */
- write_c0_config5(config5);
- enable_fpu_hazard();
+ return 0;
}
+
+ /*
+ * Ensure FRE is clear whilst running _init_fpu, since
+ * single precision FP instructions are used. If FRE
+ * was set then we'll just end up initialising all 32
+ * 64b registers.
+ */
+ config5 = clear_c0_config5(MIPS_CONF5_FRE);
+ enable_fpu_hazard();
+
+ _init_fpu();
+
+ /* Restore FRE */
+ write_c0_config5(config5);
+ enable_fpu_hazard();
} else
fpu_emulator_init_fpu();
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index ef9987a61d88..1de190bdfb9c 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -45,19 +45,19 @@
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=&r" (oldval), \
- "=" GCC_OFF12_ASM() (*uaddr) \
- : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
+ "=" GCC_OFF_SMALL_ASM() (*uaddr) \
+ : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
"i" (-EFAULT) \
: "memory"); \
} else if (cpu_has_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
" .set mips0 \n" \
" " insn " \n" \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"2: "user_sc("$1", "%2")" \n" \
" beqz $1, 1b \n" \
__WEAK_LLSC_MB \
@@ -74,8 +74,8 @@
" "__UA_ADDR "\t2b, 4b \n" \
" .previous \n" \
: "=r" (ret), "=&r" (oldval), \
- "=" GCC_OFF12_ASM() (*uaddr) \
- : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
+ "=" GCC_OFF_SMALL_ASM() (*uaddr) \
+ : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
"i" (-EFAULT) \
: "memory"); \
} else \
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
- : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
- : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+ : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
"1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n"
" .set mips0 \n"
" move $1, %z5 \n"
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
"2: "user_sc("$1", "%2")" \n"
" beqz $1, 1b \n"
__WEAK_LLSC_MB
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n"
" .previous \n"
- : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
- : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+ : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+ : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT)
: "memory");
} else
diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h
index f8d37d1df5de..9fac64a26353 100644
--- a/arch/mips/include/asm/fw/arc/hinv.h
+++ b/arch/mips/include/asm/fw/arc/hinv.h
@@ -119,7 +119,7 @@ union key_u {
#define SGI_ARCS_REV 10 /* rev .10, 3/04/92 */
#endif
-typedef struct component {
+typedef struct {
CONFIGCLASS Class;
CONFIGTYPE Type;
IDENTIFIERFLAG Flags;
@@ -140,7 +140,7 @@ struct cfgdata {
};
/* System ID */
-typedef struct systemid {
+typedef struct {
CHAR VendorId[8];
CHAR ProductId[8];
} SYSTEMID;
@@ -166,7 +166,7 @@ typedef enum memorytype {
#endif /* _NT_PROM */
} MEMORYTYPE;
-typedef struct memorydescriptor {
+typedef struct {
MEMORYTYPE Type;
LONG BasePage;
LONG PageCount;
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index 4be1a57cdbb0..71a986e9b694 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -25,8 +25,6 @@ struct gio_driver {
int (*probe)(struct gio_device *, const struct gio_device_id *);
void (*remove)(struct gio_device *);
- int (*suspend)(struct gio_device *, pm_message_t);
- int (*resume)(struct gio_device *);
void (*shutdown)(struct gio_device *);
struct device_driver driver;
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index e3ee92d4dbe7..4087b47ad1cb 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -11,6 +11,7 @@
#define _ASM_HAZARDS_H
#include <linux/stringify.h>
+#include <asm/compiler.h>
#define ___ssnop \
sll $0, $0, 1
@@ -21,7 +22,7 @@
/*
* TLB hazards
*/
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
/*
* MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@ do { \
unsigned long tmp; \
\
__asm__ __volatile__( \
- " .set mips64r2 \n" \
+ " .set "MIPS_ISA_LEVEL" \n" \
" dla %0, 1f \n" \
" jr.hb %0 \n" \
" .set mips0 \n" \
@@ -132,7 +133,7 @@ do { \
#define instruction_hazard() \
do { \
- if (cpu_has_mips_r2) \
+ if (cpu_has_mips_r2_r6) \
__instruction_hazard(); \
} while (0)
@@ -240,7 +241,7 @@ do { \
#define __disable_fpu_hazard
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
#define __enable_fpu_hazard \
___ehb
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 0fa5fdcd1f01..d60cc68fa31e 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -15,9 +15,10 @@
#include <linux/compiler.h>
#include <linux/stringify.h>
+#include <asm/compiler.h>
#include <asm/hazards.h>
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
static inline void arch_local_irq_disable(void)
{
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
static inline void arch_local_irq_enable(void)
{
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_MIPSR2)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
" ei \n"
#else
" mfc0 $1,$12 \n"
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f2c249796ea8..ac4fc716062b 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -120,6 +120,7 @@ struct kvm_vcpu_stat {
u32 resvd_inst_exits;
u32 break_inst_exits;
u32 flush_dcache_exits;
+ u32 halt_successful_poll;
u32 halt_wakeup;
};
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 46dfc3c1fd49..8feaed62a2ab 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -5,6 +5,7 @@
#include <linux/bitops.h>
#include <linux/atomic.h>
#include <asm/cmpxchg.h>
+#include <asm/compiler.h>
#include <asm/war.h>
typedef struct
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
"1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n"
__SC "%0, %2 \n"
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
- " .set arch=r4000 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
"1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n"
__SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index 1668ee57acb9..cf92fe733995 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -8,11 +8,10 @@
#ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
#define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
-
-#define CP0_CYCLE_COUNTER $9, 6
#define CP0_CVMCTL_REG $9, 7
#define CP0_CVMMEMCTL_REG $11,7
#define CP0_PRID_REG $15, 0
+#define CP0_DCACHE_ERR_REG $27, 1
#define CP0_PRID_OCTEON_PASS1 0x000d0000
#define CP0_PRID_OCTEON_CN30XX 0x000d0200
@@ -38,36 +37,55 @@
# Needed for octeon specific memcpy
or v0, v0, 0x5001
xor v0, v0, 0x1001
- # Read the processor ID register
- mfc0 v1, CP0_PRID_REG
- # Disable instruction prefetching (Octeon Pass1 errata)
- or v0, v0, 0x2000
- # Skip reenable of prefetching for Octeon Pass1
- beq v1, CP0_PRID_OCTEON_PASS1, skip
- nop
- # Reenable instruction prefetching, not on Pass1
- xor v0, v0, 0x2000
- # Strip off pass number off of processor id
- srl v1, 8
- sll v1, 8
- # CN30XX needs some extra stuff turned off for better performance
- bne v1, CP0_PRID_OCTEON_CN30XX, skip
- nop
- # CN30XX Use random Icache replacement
- or v0, v0, 0x400
- # CN30XX Disable instruction prefetching
- or v0, v0, 0x2000
-skip:
# First clear off CvmCtl[IPPCI] bit and move the performance
# counters interrupt to IRQ 6
- li v1, ~(7 << 7)
+ dli v1, ~(7 << 7)
and v0, v0, v1
ori v0, v0, (6 << 7)
+
+ mfc0 v1, CP0_PRID_REG
+ and t1, v1, 0xfff8
+ xor t1, t1, 0x9000 # 63-P1
+ beqz t1, 4f
+ and t1, v1, 0xfff8
+ xor t1, t1, 0x9008 # 63-P2
+ beqz t1, 4f
+ and t1, v1, 0xfff8
+ xor t1, t1, 0x9100 # 68-P1
+ beqz t1, 4f
+ and t1, v1, 0xff00
+ xor t1, t1, 0x9200 # 66-PX
+ bnez t1, 5f # Skip WAR for others.
+ and t1, v1, 0x00ff
+ slti t1, t1, 2 # 66-P1.2 and later good.
+ beqz t1, 5f
+
+4: # core-16057 work around
+ or v0, v0, 0x2000 # Set IPREF bit.
+
+5: # No core-16057 work around
# Write the cavium control register
dmtc0 v0, CP0_CVMCTL_REG
sync
# Flush dcache after config change
cache 9, 0($0)
+ # Zero all of CVMSEG to make sure parity is correct
+ dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+ dsll v0, 7
+ beqz v0, 2f
+1: dsubu v0, 8
+ sd $0, -32768(v0)
+ bnez v0, 1b
+2:
+ mfc0 v0, CP0_PRID_REG
+ bbit0 v0, 15, 1f
+ # OCTEON II or better have bit 15 set. Clear the error bits.
+ and t1, v0, 0xff00
+ dli v0, 0x9500
+ bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0
+ dli v0, 0x27
+ dmtc0 v0, CP0_DCACHE_ERR_REG
+1:
# Get my core id
rdhwr v0, $0
# Jump the master to kernel_entry
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h
index eb72b35cf04b..35c80be92207 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/war.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/war.h
@@ -22,4 +22,7 @@
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
+#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \
+ OCTEON_IS_MODEL(OCTEON_CN6XXX)
+
#endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
index 986982db7c38..79cff26d8b36 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
@@ -27,8 +27,6 @@ struct jz_nand_platform_data {
struct nand_ecclayout *ecc_layout;
- unsigned int busy_gpio;
-
unsigned char banks[JZ_NAND_NUM_BANKS];
void (*ident_callback)(struct platform_device *, struct nand_chip *,
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2e54b4bff5cf..90dbe43c8d27 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
- : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+ : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
}
/*
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
- : "ir" (mask), GCC_OFF12_ASM() (*addr));
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+ : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
}
/*
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
- : "ir" (~mask), GCC_OFF12_ASM() (*addr));
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+ : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
}
/*
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
" "__beqz"%0, 1b \n"
" nop \n"
" .set pop \n"
- : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
- : "ir" (mask), GCC_OFF12_ASM() (*addr));
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+ : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
}
/*
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
" .set arch=r4000 \n" \
"1: ll %0, %1 #custom_read_reg32 \n" \
" .set pop \n" \
- : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \
- : GCC_OFF12_ASM() (*address))
+ : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \
+ : GCC_OFF_SMALL_ASM() (*address))
#define custom_write_reg32(address, tmp) \
__asm__ __volatile__( \
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
" "__beqz"%0, 1b \n" \
" nop \n" \
" .set pop \n" \
- : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \
- : "0" (tmp), GCC_OFF12_ASM() (*address))
+ : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \
+ : "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b95a827d763e..59c0901bdd84 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void)
/* Macros to ease the creation of register access functions */
#define BUILD_CM_R_(name, off) \
-static inline u32 *addr_gcr_##name(void) \
+static inline u32 __iomem *addr_gcr_##name(void) \
{ \
- return (u32 *)(mips_cm_base + (off)); \
+ return (u32 __iomem *)(mips_cm_base + (off)); \
} \
\
static inline u32 read_gcr_##name(void) \
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644
index 000000000000..60570f2c3ba2
--- /dev/null
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
+#define __ASM_MIPS_R2_TO_R6_EMUL_H
+
+struct mips_r2_emulator_stats {
+ u64 movs;
+ u64 hilo;
+ u64 muls;
+ u64 divs;
+ u64 dsps;
+ u64 bops;
+ u64 traps;
+ u64 fpus;
+ u64 loads;
+ u64 stores;
+ u64 llsc;
+ u64 dsemul;
+};
+
+struct mips_r2br_emulator_stats {
+ u64 jrs;
+ u64 bltzl;
+ u64 bgezl;
+ u64 bltzll;
+ u64 bgezll;
+ u64 bltzall;
+ u64 bgezall;
+ u64 bltzal;
+ u64 bgezal;
+ u64 beql;
+ u64 bnel;
+ u64 blezl;
+ u64 bgtzl;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MIPS_R2_STATS(M) \
+do { \
+ u32 nir; \
+ int err; \
+ \
+ preempt_disable(); \
+ __this_cpu_inc(mipsr2emustats.M); \
+ err = __get_user(nir, (u32 __user *)regs->cp0_epc); \
+ if (!err) { \
+ if (nir == BREAK_MATH) \
+ __this_cpu_inc(mipsr2bdemustats.M); \
+ } \
+ preempt_enable(); \
+} while (0)
+
+#define MIPS_R2BR_STATS(M) \
+do { \
+ preempt_disable(); \
+ __this_cpu_inc(mipsr2bremustats.M); \
+ preempt_enable(); \
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M) do { } while (0)
+#define MIPS_R2BR_STATS(M) do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+ u32 mask;
+ u32 code;
+ int (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ const char *str);
+
+#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
+static int mipsr2_emulation;
+static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
+#else
+/* MIPS R2 Emulator ON/OFF */
+extern int mipsr2_emulation;
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
+#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
+
+#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation)
+
+#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 5e4aef304b02..fef004434096 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -653,6 +653,7 @@
#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
+#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@ do { \
#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val)
#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val)
+#define read_c0_lladdr() __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val)
#define read_c0_maar() __read_ulong_c0_register($17, 1)
#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val)
#define read_c0_maari() __read_32bit_c0_register($17, 2)
@@ -1386,12 +1389,27 @@ do { \
__res; \
})
+#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \
+do { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set reorder \n" \
+ " "STR(gas_hardfloat)" \n" \
+ " ctc1 %0,"STR(dest)" \n" \
+ " .set pop \n" \
+ : : "r" (val)); \
+} while (0)
+
#ifdef GAS_HAS_SET_HARDFLOAT
#define read_32bit_cp1_register(source) \
_read_32bit_cp1_register(source, .set hardfloat)
+#define write_32bit_cp1_register(dest, val) \
+ _write_32bit_cp1_register(dest, val, .set hardfloat)
#else
#define read_32bit_cp1_register(source) \
_read_32bit_cp1_register(source, )
+#define write_32bit_cp1_register(dest, val) \
+ _write_32bit_cp1_register(dest, val, )
#endif
#ifdef HAVE_AS_DSP
@@ -1894,6 +1912,7 @@ __BUILD_SET_C0(config5)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
+__BUILD_SET_C0(pagegrain)
__BUILD_SET_C0(brcm_config_0)
__BUILD_SET_C0(brcm_bus_pll)
__BUILD_SET_C0(brcm_reset)
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index c436138945a8..1afa1f986df8 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -1,9 +1,12 @@
#ifndef __ASM_MMU_H
#define __ASM_MMU_H
+#include <linux/atomic.h>
+
typedef struct {
unsigned long asid[NR_CPUS];
void *vdso;
+ atomic_t fp_mode_switching;
} mm_context_t;
#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 2f82568a3ee4..45914b59824c 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -25,7 +25,6 @@ do { \
if (cpu_has_htw) { \
write_c0_pwbase(pgd); \
back_to_back_c0_hazard(); \
- htw_reset(); \
} \
} while (0)
@@ -132,6 +131,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
for_each_possible_cpu(i)
cpu_context(i, mm) = 0;
+ atomic_set(&mm->context.fp_mode_switching, 0);
+
return 0;
}
@@ -142,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned long flags;
local_irq_save(flags);
+ htw_stop();
/* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
get_new_mmu_context(next, cpu);
@@ -154,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
+ htw_start();
local_irq_restore(flags);
}
@@ -180,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
local_irq_save(flags);
+ htw_stop();
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu);
@@ -189,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
/* mark mmu ownership change */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
+ htw_start();
local_irq_restore(flags);
}
@@ -203,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
unsigned long flags;
local_irq_save(flags);
+ htw_stop();
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
@@ -211,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
/* will get a new context next time */
cpu_context(cpu, mm) = 0;
}
+ htw_start();
local_irq_restore(flags);
}
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 800fe578dc99..0aaf9a01ea50 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "MIPS32_R1 "
#elif defined CONFIG_CPU_MIPS32_R2
#define MODULE_PROC_FAMILY "MIPS32_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
#elif defined CONFIG_CPU_MIPS64_R1
#define MODULE_PROC_FAMILY "MIPS64_R1 "
#elif defined CONFIG_CPU_MIPS64_R2
#define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
#elif defined CONFIG_CPU_R3000
#define MODULE_PROC_FAMILY "R3000 "
#elif defined CONFIG_CPU_TX39XX
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 75739c83f07e..8d05d9069823 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
" lbu %[ticket], %[now_serving]\n"
"4:\n"
".set pop\n" :
- [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+ [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
[now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
[my_ticket] "=r"(my_ticket)
);
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
new file mode 100644
index 000000000000..0c9c3e74d4ae
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
@@ -0,0 +1,306 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2014 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_RST_DEFS_H__
+#define __CVMX_RST_DEFS_H__
+
+#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull))
+#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull))
+#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull))
+#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8)
+#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull))
+#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull))
+#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull))
+#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull))
+#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull))
+#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull))
+#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8)
+#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull))
+
+union cvmx_rst_boot {
+ uint64_t u64;
+ struct cvmx_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t chipkill:1;
+ uint64_t jtcsrdis:1;
+ uint64_t ejtagdis:1;
+ uint64_t romen:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t jt_tstmode:1;
+ uint64_t vrm_err:1;
+ uint64_t reserved_37_56:20;
+ uint64_t c_mul:7;
+ uint64_t pnr_mul:6;
+ uint64_t reserved_21_23:3;
+ uint64_t lboot_oci:3;
+ uint64_t lboot_ext:6;
+ uint64_t lboot:10;
+ uint64_t rboot:1;
+ uint64_t rboot_pin:1;
+#else
+ uint64_t rboot_pin:1;
+ uint64_t rboot:1;
+ uint64_t lboot:10;
+ uint64_t lboot_ext:6;
+ uint64_t lboot_oci:3;
+ uint64_t reserved_21_23:3;
+ uint64_t pnr_mul:6;
+ uint64_t c_mul:7;
+ uint64_t reserved_37_56:20;
+ uint64_t vrm_err:1;
+ uint64_t jt_tstmode:1;
+ uint64_t ckill_ppdis:1;
+ uint64_t romen:1;
+ uint64_t ejtagdis:1;
+ uint64_t jtcsrdis:1;
+ uint64_t chipkill:1;
+#endif
+ } s;
+ struct cvmx_rst_boot_s cn70xx;
+ struct cvmx_rst_boot_s cn70xxp1;
+ struct cvmx_rst_boot_s cn78xx;
+};
+
+union cvmx_rst_cfg {
+ uint64_t u64;
+ struct cvmx_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t bist_delay:58;
+ uint64_t reserved_3_5:3;
+ uint64_t cntl_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t soft_clr_bist:1;
+#else
+ uint64_t soft_clr_bist:1;
+ uint64_t warm_clr_bist:1;
+ uint64_t cntl_clr_bist:1;
+ uint64_t reserved_3_5:3;
+ uint64_t bist_delay:58;
+#endif
+ } s;
+ struct cvmx_rst_cfg_s cn70xx;
+ struct cvmx_rst_cfg_s cn70xxp1;
+ struct cvmx_rst_cfg_s cn78xx;
+};
+
+union cvmx_rst_ckill {
+ uint64_t u64;
+ struct cvmx_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_47_63:17;
+ uint64_t timer:47;
+#else
+ uint64_t timer:47;
+ uint64_t reserved_47_63:17;
+#endif
+ } s;
+ struct cvmx_rst_ckill_s cn70xx;
+ struct cvmx_rst_ckill_s cn70xxp1;
+ struct cvmx_rst_ckill_s cn78xx;
+};
+
+union cvmx_rst_ctlx {
+ uint64_t u64;
+ struct cvmx_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_10_63:54;
+ uint64_t prst_link:1;
+ uint64_t rst_done:1;
+ uint64_t rst_link:1;
+ uint64_t host_mode:1;
+ uint64_t reserved_4_5:2;
+ uint64_t rst_drv:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_val:1;
+#else
+ uint64_t rst_val:1;
+ uint64_t rst_chip:1;
+ uint64_t rst_rcv:1;
+ uint64_t rst_drv:1;
+ uint64_t reserved_4_5:2;
+ uint64_t host_mode:1;
+ uint64_t rst_link:1;
+ uint64_t rst_done:1;
+ uint64_t prst_link:1;
+ uint64_t reserved_10_63:54;
+#endif
+ } s;
+ struct cvmx_rst_ctlx_s cn70xx;
+ struct cvmx_rst_ctlx_s cn70xxp1;
+ struct cvmx_rst_ctlx_s cn78xx;
+};
+
+union cvmx_rst_delay {
+ uint64_t u64;
+ struct cvmx_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t warm_rst_dly:16;
+ uint64_t soft_rst_dly:16;
+#else
+ uint64_t soft_rst_dly:16;
+ uint64_t warm_rst_dly:16;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_rst_delay_s cn70xx;
+ struct cvmx_rst_delay_s cn70xxp1;
+ struct cvmx_rst_delay_s cn78xx;
+};
+
+union cvmx_rst_eco {
+ uint64_t u64;
+ struct cvmx_rst_eco_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_32_63:32;
+ uint64_t eco_rw:32;
+#else
+ uint64_t eco_rw:32;
+ uint64_t reserved_32_63:32;
+#endif
+ } s;
+ struct cvmx_rst_eco_s cn78xx;
+};
+
+union cvmx_rst_int {
+ uint64_t u64;
+ struct cvmx_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_12_63:52;
+ uint64_t perst:4;
+ uint64_t reserved_4_7:4;
+ uint64_t rst_link:4;
+#else
+ uint64_t rst_link:4;
+ uint64_t reserved_4_7:4;
+ uint64_t perst:4;
+ uint64_t reserved_12_63:52;
+#endif
+ } s;
+ struct cvmx_rst_int_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_11_63:53;
+ uint64_t perst:3;
+ uint64_t reserved_3_7:5;
+ uint64_t rst_link:3;
+#else
+ uint64_t rst_link:3;
+ uint64_t reserved_3_7:5;
+ uint64_t perst:3;
+ uint64_t reserved_11_63:53;
+#endif
+ } cn70xx;
+ struct cvmx_rst_int_cn70xx cn70xxp1;
+ struct cvmx_rst_int_s cn78xx;
+};
+
+union cvmx_rst_ocx {
+ uint64_t u64;
+ struct cvmx_rst_ocx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t rst_link:3;
+#else
+ uint64_t rst_link:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+ struct cvmx_rst_ocx_s cn78xx;
+};
+
+union cvmx_rst_power_dbg {
+ uint64_t u64;
+ struct cvmx_rst_power_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63:61;
+ uint64_t str:3;
+#else
+ uint64_t str:3;
+ uint64_t reserved_3_63:61;
+#endif
+ } s;
+ struct cvmx_rst_power_dbg_s cn78xx;
+};
+
+union cvmx_rst_pp_power {
+ uint64_t u64;
+ struct cvmx_rst_pp_power_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t gate:48;
+#else
+ uint64_t gate:48;
+ uint64_t reserved_48_63:16;
+#endif
+ } s;
+ struct cvmx_rst_pp_power_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_4_63:60;
+ uint64_t gate:4;
+#else
+ uint64_t gate:4;
+ uint64_t reserved_4_63:60;
+#endif
+ } cn70xx;
+ struct cvmx_rst_pp_power_cn70xx cn70xxp1;
+ struct cvmx_rst_pp_power_s cn78xx;
+};
+
+union cvmx_rst_soft_prstx {
+ uint64_t u64;
+ struct cvmx_rst_soft_prstx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t soft_prst:1;
+#else
+ uint64_t soft_prst:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+ struct cvmx_rst_soft_prstx_s cn70xx;
+ struct cvmx_rst_soft_prstx_s cn70xxp1;
+ struct cvmx_rst_soft_prstx_s cn78xx;
+};
+
+union cvmx_rst_soft_rst {
+ uint64_t u64;
+ struct cvmx_rst_soft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63:63;
+ uint64_t soft_rst:1;
+#else
+ uint64_t soft_rst:1;
+ uint64_t reserved_1_63:63;
+#endif
+ } s;
+ struct cvmx_rst_soft_rst_s cn70xx;
+ struct cvmx_rst_soft_rst_s cn70xxp1;
+ struct cvmx_rst_soft_rst_s cn78xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index c4fe81f47f53..8ebd3f579b84 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -46,8 +46,6 @@ enum octeon_feature {
OCTEON_FEATURE_SAAD,
/* Does this Octeon support the ZIP offload engine? */
OCTEON_FEATURE_ZIP,
- /* Does this Octeon support crypto acceleration using COP2? */
- OCTEON_FEATURE_CRYPTO,
OCTEON_FEATURE_DORM_CRYPTO,
/* Does this Octeon support PCI express? */
OCTEON_FEATURE_PCIE,
@@ -86,6 +84,21 @@ enum octeon_feature {
OCTEON_MAX_FEATURE
};
+enum octeon_feature_bits {
+ OCTEON_HAS_CRYPTO = 0x0001, /* Crypto acceleration using COP2 */
+};
+extern enum octeon_feature_bits __octeon_feature_bits;
+
+/**
+ * octeon_has_crypto() - Check if this OCTEON has crypto acceleration support.
+ *
+ * Returns: Non-zero if the feature exists. Zero if the feature does not exist.
+ */
+static inline int octeon_has_crypto(void)
+{
+ return __octeon_feature_bits & OCTEON_HAS_CRYPTO;
+}
+
/**
* Determine if the current Octeon supports a specific feature. These
* checks have been optimized to be fairly quick, but they should still
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index e8a1c2fd52cd..92b377e36dac 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -45,6 +45,7 @@
*/
#define OCTEON_FAMILY_MASK 0x00ffff00
+#define OCTEON_PRID_MASK 0x00ffffff
/* Flag bits in top byte */
/* Ignores revision in model checks */
@@ -63,11 +64,52 @@
#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
/* Match all cnf7XXX Octeon models. */
#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
+/* Match all cn7XXX Octeon models. */
+#define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000
+#define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \
+ OM_MATCH_6XXX_FAMILY_MODELS | \
+ OM_MATCH_F7XXX_FAMILY_MODELS | \
+ OM_MATCH_7XXX_FAMILY_MODELS)
+/*
+ * CN7XXX models with new revision encoding
+ */
+
+#define OCTEON_CN73XX_PASS1_0 0x000d9700
+#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \
+ OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN70XX_PASS1_0 0x000d9600
+#define OCTEON_CN70XX_PASS1_1 0x000d9601
+#define OCTEON_CN70XX_PASS1_2 0x000d9602
+
+#define OCTEON_CN70XX_PASS2_0 0x000d9608
+
+#define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \
+ OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \
+ OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN71XX OCTEON_CN70XX
+
+#define OCTEON_CN78XX_PASS1_0 0x000d9500
+#define OCTEON_CN78XX_PASS1_1 0x000d9501
+#define OCTEON_CN78XX_PASS2_0 0x000d9508
+
+#define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \
+ OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \
+ OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL)
/*
* CNF7XXX models with new revision encoding
*/
#define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_1 0x000d9401
#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
@@ -79,6 +121,8 @@
#define OCTEON_CN68XX_PASS1_1 0x000d9101
#define OCTEON_CN68XX_PASS1_2 0x000d9102
#define OCTEON_CN68XX_PASS2_0 0x000d9108
+#define OCTEON_CN68XX_PASS2_1 0x000d9109
+#define OCTEON_CN68XX_PASS2_2 0x000d910a
#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
@@ -104,11 +148,18 @@
#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+/* CN62XX is same as CN63XX with 1 MB cache */
+#define OCTEON_CN62XX OCTEON_CN63XX
+
#define OCTEON_CN61XX_PASS1_0 0x000d9300
+#define OCTEON_CN61XX_PASS1_1 0x000d9301
#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+/* CN60XX is same as CN61XX with 512 KB cache */
+#define OCTEON_CN60XX OCTEON_CN61XX
+
/*
* CN5XXX models with new revision encoding
*/
@@ -120,7 +171,7 @@
#define OCTEON_CN58XX_PASS2_2 0x000d030a
#define OCTEON_CN58XX_PASS2_3 0x000d030b
-#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
@@ -217,12 +268,10 @@
#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
-
-/* These are used to cover entire families of OCTEON processors */
-#define OCTEON_FAM_1 (OCTEON_CN3XXX)
-#define OCTEON_FAM_PLUS (OCTEON_CN5XXX)
-#define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
-#define OCTEON_FAM_2 (OCTEON_CN6XXX)
+#define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \
+ OM_MATCH_F7XXX_FAMILY_MODELS)
+#define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \
+ OM_MATCH_7XXX_FAMILY_MODELS)
/* The revision byte (low byte) has two different encodings.
* CN3XXX:
@@ -232,7 +281,7 @@
* <4>: alternate package
* <3:0>: revision
*
- * CN5XXX:
+ * CN5XXX and older models:
*
* bits
* <7>: reserved (0)
@@ -251,17 +300,21 @@
/* CN5XXX and later use different layout of bits in the revision ID field */
#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK 0x00ffff40
#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
-#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
-/* forward declarations */
static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
+/*
+ * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
+ * returns true if chip_model is identical or belong to the OCTEON
+ * model group specified in arg_model.
+ */
/* NOTE: This for internal use only! */
#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
@@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
- && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \
+ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
- && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \
+ && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
- && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \
+ && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \
+ && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
+ ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \
+ && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \
((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
&& (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
)))
@@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
{
uint32_t cpuid = cvmx_get_proc_id();
- /*
- * Check for special case of mismarked 3005 samples. We only
- * need to check if the sub model isn't being ignored
- */
- if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
- if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
- cpuid |= 0x10;
- }
return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
}
@@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
#define OCTEON_IS_COMMON_BINARY() 1
#undef OCTEON_MODEL
+#define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX)
+#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX)
+#define OCTEON_IS_OCTEON2() \
+ (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+
+#define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX)
+
+#define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS())
+
const char *__init octeon_model_get_string(uint32_t chip_id);
/*
* Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((unint32_t)-1) on error.
*/
static inline uint32_t cvmx_get_octeon_family(void)
{
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index d781f9e66884..041596570856 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -9,6 +9,7 @@
#define __ASM_OCTEON_OCTEON_H
#include <asm/octeon/cvmx.h>
+#include <asm/bitfield.h>
extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
uint64_t alignment,
@@ -44,11 +45,6 @@ extern int octeon_get_boot_num_arguments(void);
extern const char *octeon_get_boot_argument(int arg);
extern void octeon_hal_setup_reserved32(void);
extern void octeon_user_io_init(void);
-struct octeon_cop2_state;
-extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
-extern void octeon_crypto_disable(struct octeon_cop2_state *state,
- unsigned long flags);
-extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
extern void octeon_init_cvmcount(void);
extern void octeon_setup_delays(void);
@@ -58,6 +54,7 @@ extern void octeon_io_clk_delay(unsigned long);
#define OCTOEN_SERIAL_LEN 20
struct octeon_boot_descriptor {
+#ifdef __BIG_ENDIAN_BITFIELD
/* Start of block referenced by assembly code - do not change! */
uint32_t desc_version;
uint32_t desc_size;
@@ -109,77 +106,149 @@ struct octeon_boot_descriptor {
uint8_t mac_addr_base[6];
uint8_t mac_addr_count;
uint64_t cvmx_desc_vaddr;
+#else
+ uint32_t desc_size;
+ uint32_t desc_version;
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ /* Only used by bootloader */
+ uint64_t entry_point;
+ uint64_t desc_vaddr;
+ /* End of This block referenced by assembly code - do not change! */
+ uint32_t stack_size;
+ uint32_t exception_base_addr;
+ uint32_t argc;
+ uint32_t heap_size;
+ /*
+ * Argc count for application.
+ * Warning low bit scrambled in little-endian.
+ */
+ uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define BOOT_FLAG_INIT_CORE (1 << 0)
+#define OCTEON_BL_FLAG_DEBUG (1 << 1)
+#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
+ /* If set, use uart1 for console */
+#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
+ /* If set, use PCI console */
+#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
+ /* Call exit on break on serial port */
+#define OCTEON_BL_FLAG_BREAK (1 << 5)
+
+ uint32_t core_mask;
+ uint32_t flags;
+ /* physical address of free memory descriptor block. */
+ uint32_t phy_mem_desc_addr;
+ /* DRAM size in megabyes. */
+ uint32_t dram_size;
+ /* CPU clock speed, in hz. */
+ uint32_t eclock_hz;
+ /* used to pass flags from app to debugger. */
+ uint32_t debugger_flags_base_addr;
+ /* SPI4 clock in hz. */
+ uint32_t spi_clock_hz;
+ /* DRAM clock speed, in hz. */
+ uint32_t dclock_hz;
+ uint8_t chip_rev_minor;
+ uint8_t chip_rev_major;
+ uint16_t chip_type;
+ uint8_t board_rev_minor;
+ uint8_t board_rev_major;
+ uint16_t board_type;
+
+ uint64_t unused1[4]; /* Not even filled in by bootloader. */
+
+ uint64_t cvmx_desc_vaddr;
+#endif
};
union octeon_cvmemctl {
uint64_t u64;
struct {
/* RO 1 = BIST fail, 0 = BIST pass */
- uint64_t tlbbist:1;
+ __BITFIELD_FIELD(uint64_t tlbbist:1,
/* RO 1 = BIST fail, 0 = BIST pass */
- uint64_t l1cbist:1;
+ __BITFIELD_FIELD(uint64_t l1cbist:1,
/* RO 1 = BIST fail, 0 = BIST pass */
- uint64_t l1dbist:1;
+ __BITFIELD_FIELD(uint64_t l1dbist:1,
/* RO 1 = BIST fail, 0 = BIST pass */
- uint64_t dcmbist:1;
+ __BITFIELD_FIELD(uint64_t dcmbist:1,
/* RO 1 = BIST fail, 0 = BIST pass */
- uint64_t ptgbist:1;
+ __BITFIELD_FIELD(uint64_t ptgbist:1,
/* RO 1 = BIST fail, 0 = BIST pass */
- uint64_t wbfbist:1;
+ __BITFIELD_FIELD(uint64_t wbfbist:1,
/* Reserved */
- uint64_t reserved:22;
+ __BITFIELD_FIELD(uint64_t reserved:17,
+ /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU.
+ * This field selects between the TLB replacement policies:
+ * bitmask LRU or NLU. Bitmask LRU maintains a mask of
+ * recently used TLB entries and avoids them as new entries
+ * are allocated. NLU simply guarantees that the next
+ * allocation is not the last used TLB entry. */
+ __BITFIELD_FIELD(uint64_t tlbnlu:1,
+ /* OCTEON II - Selects the bit in the counter used for
+ * releasing a PAUSE. This counter trips every 2(8+PAUSETIME)
+ * cycles. If not already released, the cnMIPS II core will
+ * always release a given PAUSE instruction within
+ * 2(8+PAUSETIME). If the counter trip happens to line up,
+ * the cnMIPS II core may release the PAUSE instantly. */
+ __BITFIELD_FIELD(uint64_t pausetime:3,
+ /* OCTEON II - This field is an extension of
+ * CvmMemCtl[DIDTTO] */
+ __BITFIELD_FIELD(uint64_t didtto2:1,
/* R/W If set, marked write-buffer entries time out
* the same as as other entries; if clear, marked
* write-buffer entries use the maximum timeout. */
- uint64_t dismarkwblongto:1;
+ __BITFIELD_FIELD(uint64_t dismarkwblongto:1,
/* R/W If set, a merged store does not clear the
* write-buffer entry timeout state. */
- uint64_t dismrgclrwbto:1;
+ __BITFIELD_FIELD(uint64_t dismrgclrwbto:1,
/* R/W Two bits that are the MSBs of the resultant
* CVMSEG LM word location for an IOBDMA. The other 8
* bits come from the SCRADDR field of the IOBDMA. */
- uint64_t iobdmascrmsb:2;
+ __BITFIELD_FIELD(uint64_t iobdmascrmsb:2,
/* R/W If set, SYNCWS and SYNCS only order marked
* stores; if clear, SYNCWS and SYNCS only order
* unmarked stores. SYNCWSMARKED has no effect when
* DISSYNCWS is set. */
- uint64_t syncwsmarked:1;
+ __BITFIELD_FIELD(uint64_t syncwsmarked:1,
/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
* SYNC. */
- uint64_t dissyncws:1;
+ __BITFIELD_FIELD(uint64_t dissyncws:1,
/* R/W If set, no stall happens on write buffer
* full. */
- uint64_t diswbfst:1;
+ __BITFIELD_FIELD(uint64_t diswbfst:1,
/* R/W If set (and SX set), supervisor-level
* loads/stores can use XKPHYS addresses with
* VA<48>==0 */
- uint64_t xkmemenas:1;
+ __BITFIELD_FIELD(uint64_t xkmemenas:1,
/* R/W If set (and UX set), user-level loads/stores
* can use XKPHYS addresses with VA<48>==0 */
- uint64_t xkmemenau:1;
+ __BITFIELD_FIELD(uint64_t xkmemenau:1,
/* R/W If set (and SX set), supervisor-level
* loads/stores can use XKPHYS addresses with
* VA<48>==1 */
- uint64_t xkioenas:1;
+ __BITFIELD_FIELD(uint64_t xkioenas:1,
/* R/W If set (and UX set), user-level loads/stores
* can use XKPHYS addresses with VA<48>==1 */
- uint64_t xkioenau:1;
+ __BITFIELD_FIELD(uint64_t xkioenau:1,
/* R/W If set, all stores act as SYNCW (NOMERGE must
* be set when this is set) RW, reset to 0. */
- uint64_t allsyncw:1;
+ __BITFIELD_FIELD(uint64_t allsyncw:1,
/* R/W If set, no stores merge, and all stores reach
* the coherent bus in order. */
- uint64_t nomerge:1;
+ __BITFIELD_FIELD(uint64_t nomerge:1,
/* R/W Selects the bit in the counter used for DID
* time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
* 214. Actual time-out is between 1x and 2x this
* interval. For example, with DIDTTO=3, expiration
* interval is between 16K and 32K. */
- uint64_t didtto:2;
+ __BITFIELD_FIELD(uint64_t didtto:2,
/* R/W If set, the (mem) CSR clock never turns off. */
- uint64_t csrckalwys:1;
+ __BITFIELD_FIELD(uint64_t csrckalwys:1,
/* R/W If set, mclk never turns off. */
- uint64_t mclkalwys:1;
+ __BITFIELD_FIELD(uint64_t mclkalwys:1,
/* R/W Selects the bit in the counter used for write
* buffer flush time-outs (WBFLT+11) is the bit
* position in an internal counter used to determine
@@ -187,25 +256,26 @@ union octeon_cvmemctl {
* 2x this interval. For example, with WBFLT = 0, a
* write buffer expires between 2K and 4K cycles after
* the write buffer entry is allocated. */
- uint64_t wbfltime:3;
+ __BITFIELD_FIELD(uint64_t wbfltime:3,
/* R/W If set, do not put Istream in the L2 cache. */
- uint64_t istrnol2:1;
+ __BITFIELD_FIELD(uint64_t istrnol2:1,
/* R/W The write buffer threshold. */
- uint64_t wbthresh:4;
+ __BITFIELD_FIELD(uint64_t wbthresh:4,
/* Reserved */
- uint64_t reserved2:2;
+ __BITFIELD_FIELD(uint64_t reserved2:2,
/* R/W If set, CVMSEG is available for loads/stores in
* kernel/debug mode. */
- uint64_t cvmsegenak:1;
+ __BITFIELD_FIELD(uint64_t cvmsegenak:1,
/* R/W If set, CVMSEG is available for loads/stores in
* supervisor mode. */
- uint64_t cvmsegenas:1;
+ __BITFIELD_FIELD(uint64_t cvmsegenas:1,
/* R/W If set, CVMSEG is available for loads/stores in
* user mode. */
- uint64_t cvmsegenau:1;
+ __BITFIELD_FIELD(uint64_t cvmsegenau:1,
/* R/W Size of local memory in cache blocks, 54 (6912
* bytes) is max legal value. */
- uint64_t lmemsz:6;
+ __BITFIELD_FIELD(uint64_t lmemsz:6,
+ ;)))))))))))))))))))))))))))))))))
} s;
};
@@ -229,6 +299,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val)
cvmx_read64_uint32(address ^ 4);
}
+/* Octeon multiplier save/restore routines from octeon_switch.S */
+void octeon_mult_save(void);
+void octeon_mult_restore(void);
+void octeon_mult_save_end(void);
+void octeon_mult_restore_end(void);
+void octeon_mult_save3(void);
+void octeon_mult_save3_end(void);
+void octeon_mult_save2(void);
+void octeon_mult_save2_end(void);
+void octeon_mult_restore3(void);
+void octeon_mult_restore3_end(void);
+void octeon_mult_restore2(void);
+void octeon_mult_restore2_end(void);
/**
* Read a 32bit value from the Octeon NPI register space
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 69529624a005..193b4c6b7541 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
}
#endif
+#ifdef CONFIG_PCI_DOMAINS
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus)
@@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
struct pci_controller *hose = bus->sysdata;
return hose->need_domain_info;
}
+#endif /* CONFIG_PCI_DOMAINS */
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 68984b612f9d..a6be006b6f75 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START MAP_BASE
@@ -161,22 +161,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/*
- * Encode and decode a nonlinear file mapping entry
- */
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
- (((_pte).pte >> 2 ) & 0x38) | \
- (((_pte).pte >> 10) << 6 ))
-
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
- (((off) & 0x38) << 2 ) | \
- (((off) >> 6 ) << 10) | \
- _PAGE_FILE })
-
-/*
- * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
- */
-#define PTE_FILE_MAX_BITS 28
#else
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -188,13 +172,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
-/*
- * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
- */
-#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
-
-#define PTE_FILE_MAX_BITS 30
#else
/*
* Constraints:
@@ -209,19 +186,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/*
- * Encode and decode a nonlinear file mapping entry
- */
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
- (((_pte).pte >> 2) & 0x8) | \
- (((_pte).pte >> 8) << 4))
-
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
- (((off) & 0x8) << 2) | \
- (((off) >> 4) << 8) | \
- _PAGE_FILE })
-
-#define PTE_FILE_MAX_BITS 28
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index e1c49a96807d..1659bb91ae21 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -291,13 +291,4 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-/*
- * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
- * make things easier, and only use the upper 56 bits for the page offset...
- */
-#define PTE_FILE_MAX_BITS 56
-
-#define pte_to_pgoff(_pte) ((_pte).pte >> 8)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
-
#endif /* _ASM_PGTABLE_64_H */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index ca11f14f40a3..91747c282bb3 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -35,7 +35,7 @@
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/*
- * The following bits are directly used by the TLB hardware
+ * The following bits are implemented by the TLB hardware
*/
#define _PAGE_GLOBAL_SHIFT 0
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
@@ -48,8 +48,6 @@
/*
* The following bits are implemented in software
- *
- * _PAGE_FILE semantics: set:pagecache unset:swap
*/
#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
@@ -62,48 +60,40 @@
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_SILENT_READ _PAGE_VALID
-#define _PAGE_SILENT_WRITE _PAGE_DIRTY
-#define _PAGE_FILE _PAGE_MODIFIED
-
#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
/*
- * The following are implemented by software
- *
- * _PAGE_FILE semantics: set:pagecache unset:swap
+ * The following bits are implemented in software
*/
-#define _PAGE_PRESENT_SHIFT 0
-#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT 1
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT 2
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT 3
-#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT 4
-#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE_SHIFT 4
-#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT)
+#define _PAGE_PRESENT_SHIFT (0)
+#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
/*
- * And these are the hardware TLB bits
+ * The following bits are implemented by the TLB hardware
*/
-#define _PAGE_GLOBAL_SHIFT 8
-#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT 9
-#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */
-#define _PAGE_DIRTY_SHIFT 10
+#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4)
+#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
+#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT 11
+#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1)
#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT)
+#define _CACHE_MASK _CACHE_UNCACHED
+
+#define _PFN_SHIFT PAGE_SHIFT
-#else /* 'Normal' r4K case */
+#else
/*
* When using the RI/XI bit support, we have 13 bits of flags below
* the physical address. The RI/XI bits are placed such that a SRL 5
@@ -114,11 +104,8 @@
/*
* The following bits are implemented in software
- *
- * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
- * _PAGE_FILE semantics: set:pagecache unset:swap
*/
-#define _PAGE_PRESENT_SHIFT (0)
+#define _PAGE_PRESENT_SHIFT 0
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
@@ -128,22 +115,16 @@
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE (_PAGE_MODIFIED)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
-#else
-#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
-#endif
-
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-/* huge tlb page */
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
#else
+#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
#endif
@@ -158,17 +139,10 @@
#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-/* synonym */
-#define _PAGE_SILENT_READ (_PAGE_VALID)
-
-/* The MIPS dirty bit */
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE (_PAGE_DIRTY)
-
#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
#define _CACHE_MASK (7 << _CACHE_SHIFT)
@@ -176,9 +150,9 @@
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
-#ifndef _PFN_SHIFT
-#define _PFN_SHIFT PAGE_SHIFT
-#endif
+#define _PAGE_SILENT_READ _PAGE_VALID
+#define _PAGE_SILENT_WRITE _PAGE_DIRTY
+
#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
#ifndef _PAGE_NO_READ
@@ -188,9 +162,6 @@
#ifndef _PAGE_NO_EXEC
#define _PAGE_NO_EXEC ({BUG(); 0; })
#endif
-#ifndef _PAGE_GLOBAL_SHIFT
-#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
-#endif
#ifndef __ASSEMBLY__
@@ -275,8 +246,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#endif
#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
-#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
+#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
-#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
+ _PFN_MASK | _CACHE_MASK)
#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 62a6ba383d4f..bef782c4a44b 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -99,29 +99,35 @@ extern void paging_init(void);
#define htw_stop() \
do { \
- if (cpu_has_htw) \
- write_c0_pwctl(read_c0_pwctl() & \
- ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
+ unsigned long flags; \
+ \
+ if (cpu_has_htw) { \
+ local_irq_save(flags); \
+ if(!raw_current_cpu_data.htw_seq++) { \
+ write_c0_pwctl(read_c0_pwctl() & \
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
+ back_to_back_c0_hazard(); \
+ } \
+ local_irq_restore(flags); \
+ } \
} while(0)
#define htw_start() \
do { \
- if (cpu_has_htw) \
- write_c0_pwctl(read_c0_pwctl() | \
- (1 << MIPS_PWCTL_PWEN_SHIFT)); \
-} while(0)
-
-
-#define htw_reset() \
-do { \
+ unsigned long flags; \
+ \
if (cpu_has_htw) { \
- htw_stop(); \
- back_to_back_c0_hazard(); \
- htw_start(); \
- back_to_back_c0_hazard(); \
+ local_irq_save(flags); \
+ if (!--raw_current_cpu_data.htw_seq) { \
+ write_c0_pwctl(read_c0_pwctl() | \
+ (1 << MIPS_PWCTL_PWEN_SHIFT)); \
+ back_to_back_c0_hazard(); \
+ } \
+ local_irq_restore(flags); \
} \
} while(0)
+
extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pteval);
@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
{
pte_t null = __pte(0);
+ htw_stop();
/* Preserve global status for the pair */
if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
null.pte_low = null.pte_high = _PAGE_GLOBAL;
set_pte_at(mm, addr, ptep, null);
- htw_reset();
+ htw_start();
}
#else
@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ htw_stop();
#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
/* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
else
#endif
set_pte_at(mm, addr, ptep, __pte(0));
- htw_reset();
+ htw_start();
}
#endif
@@ -231,7 +239,6 @@ extern pgd_t swapper_pg_dir[];
static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -287,7 +294,6 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -336,7 +342,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
-#ifdef _PAGE_HUGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
static inline pte_t pte_mkhuge(pte_t pte)
@@ -344,7 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
pte_val(pte) |= _PAGE_HUGE;
return pte;
}
-#endif /* _PAGE_HUGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#endif
static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index f1df4cb4a286..b5dcbee01fd7 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count;
#define TASK_SIZE 0x7fff8000UL
#endif
-#ifdef __KERNEL__
#define STACK_TOP_MAX TASK_SIZE
-#endif
#define TASK_IS_32BIT_ADDR 1
@@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count;
#define TASK_SIZE32 0x7fff8000UL
#define TASK_SIZE64 0x10000000000UL
#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
-
-#ifdef __KERNEL__
#define STACK_TOP_MAX TASK_SIZE64
-#endif
-
#define TASK_SIZE_OF(tsk) \
(test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
@@ -211,6 +205,8 @@ struct octeon_cop2_state {
unsigned long cop2_gfm_poly;
/* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
unsigned long cop2_gfm_result[2];
+ /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
+ unsigned long cop2_sha3[2];
};
#define COP2_INIT \
.cp2 = {0,},
@@ -399,4 +395,15 @@ unsigned long get_wchan(struct task_struct *p);
#endif
+/*
+ * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
+ * to the prctl syscall.
+ */
+extern int mips_get_process_fp_mode(struct task_struct *task);
+extern int mips_set_process_fp_mode(struct task_struct *task,
+ unsigned int value);
+
+#define GET_FP_MODE(task) mips_get_process_fp_mode(task)
+#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
+
#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index eaa26270a5e5..8ebc2aa5f3e1 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -24,13 +24,6 @@ struct boot_param_header;
extern void __dt_setup_arch(void *bph);
extern int __dt_register_buses(const char *bus0, const char *bus1);
-#define dt_setup_arch(sym) \
-({ \
- extern char __dtb_##sym##_begin[]; \
- \
- __dt_setup_arch(__dtb_##sym##_begin); \
-})
-
#else /* CONFIG_OF */
static inline void device_tree_init(void) { }
#endif /* CONFIG_OF */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index fc783f843bdc..ffc320389f40 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -40,8 +40,8 @@ struct pt_regs {
unsigned long cp0_cause;
unsigned long cp0_epc;
#ifdef CONFIG_CPU_CAVIUM_OCTEON
- unsigned long long mpl[3]; /* MTM{0,1,2} */
- unsigned long long mtp[3]; /* MTP{0,1,2} */
+ unsigned long long mpl[6]; /* MTM{0-5} */
+ unsigned long long mtp[6]; /* MTP{0-5} */
#endif
} __aligned(8);
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index e293a8d89a6d..1b22d2da88a1 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -14,6 +14,7 @@
#include <asm/asm.h>
#include <asm/cacheops.h>
+#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void);
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
" cache %0, %1 \n" \
" .set pop \n" \
: \
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
- " .set arch=r4000 \n" \
+ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: cache %0, (%1) \n" \
"2: .set pop \n" \
" .section __ex_table,\"a\" \n" \
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
cache_op(Page_Invalidate_T, addr);
}
+#ifndef CONFIG_CPU_MIPSR6
#define cache16_unroll32(base,op) \
__asm__ __volatile__( \
" .set push \n" \
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr)
: "r" (base), \
"i" (op));
+#else
+/*
+ * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
+ * This means we now need to increment the base register before we flush
+ * more cache lines
+ */
+#define cache16_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noreorder\n" \
+ " .set mips64r6\n" \
+ " .set noat\n" \
+ " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
+ " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
+ " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
+ " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
+ " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
+ " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
+ " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
+ " addiu $1, $0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
+ " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
+ " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
+ " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
+ " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
+ " .set pop\n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache32_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noreorder\n" \
+ " .set mips64r6\n" \
+ " .set noat\n" \
+ " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
+ " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
+ " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
+ " addiu $1, $1, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
+ " addiu $1, $1, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
+ " .set pop\n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache64_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noreorder\n" \
+ " .set mips64r6\n" \
+ " .set noat\n" \
+ " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
+ " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
+ " .set pop\n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache128_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noreorder\n" \
+ " .set mips64r6\n" \
+ " .set noat\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
+ " addiu $1, %0, 0x100\n" \
+ " .set pop\n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
+
/*
* Perform the cache operation specified by op using a user mode virtual
* address while in kernel mode.
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 753275accd18..195db5045ae5 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -11,6 +11,7 @@
#ifndef _ASM_SGIALIB_H
#define _ASM_SGIALIB_H
+#include <linux/compiler.h>
#include <asm/sgiarcs.h>
extern struct linux_romvec *romvec;
@@ -70,8 +71,11 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
/* Misc. routines. */
-extern VOID ArcReboot(VOID) __attribute__((noreturn));
-extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn));
+extern VOID ArcHalt(VOID) __noreturn;
+extern VOID ArcPowerDown(VOID) __noreturn;
+extern VOID ArcRestart(VOID) __noreturn;
+extern VOID ArcReboot(VOID) __noreturn;
+extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
extern VOID ArcFlushAllCaches(VOID);
extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
deleted file mode 100644
index dd9a762646fc..000000000000
--- a/arch/mips/include/asm/siginfo.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle
- * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
- */
-#ifndef _ASM_SIGINFO_H
-#define _ASM_SIGINFO_H
-
-#include <uapi/asm/siginfo.h>
-
-
-/*
- * Duplicated here because of <asm-generic/siginfo.h> braindamage ...
- */
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
-}
-
-#endif /* _ASM_SIGINFO_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index c6d06d383ef9..b4548690ade9 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
- : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+ : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" bltz %1, 1b \n"
" addu %1, 1 \n"
"2: sc %1, %0 \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
smp_llsc_mb();
}
-/* Note the use of sub, not subu which will make the kernel die with an
- overflow exception if we ever try to unlock an rwlock that is already
- unlocked or is being held by a writer. */
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
- " sub %1, 1 \n"
+ " addiu %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
- " sub %1, 1 \n"
+ " addiu %1, -1 \n"
" sc %1, %0 \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
"2: sc %1, %0 \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
}
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
" li %2, 1 \n"
" .set reorder \n"
"2: \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
- : GCC_OFF12_ASM() (rw->lock)
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
" sc %1, %0 \n"
" li %2, 1 \n"
"2: \n"
- : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
+ : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
- : GCC_OFF12_ASM() (rw->lock)
+ : GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
index 0b89006e4907..0f90d88e464d 100644
--- a/arch/mips/include/asm/spram.h
+++ b/arch/mips/include/asm/spram.h
@@ -1,10 +1,10 @@
#ifndef _MIPS_SPRAM_H
#define _MIPS_SPRAM_H
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_MIPS_SPRAM)
extern __init void spram_config(void);
#else
static inline void spram_config(void) { };
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_MIPS_SPRAM */
#endif /* _MIPS_SPRAM_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index b188c797565c..28d6d9364bd1 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -40,7 +40,7 @@
LONG_S v1, PT_HI(sp)
mflhxu v1
LONG_S v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
mfhi v1
#endif
#ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
LONG_S $10, PT_R10(sp)
LONG_S $11, PT_R11(sp)
LONG_S $12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
LONG_S v1, PT_HI(sp)
mflo v1
#endif
@@ -58,7 +58,7 @@
LONG_S $14, PT_R14(sp)
LONG_S $15, PT_R15(sp)
LONG_S $24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
LONG_S v1, PT_LO(sp)
#endif
#ifdef CONFIG_CPU_CAVIUM_OCTEON
@@ -226,7 +226,7 @@
mtlhx $24
LONG_L $24, PT_LO(sp)
mtlhx $24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
LONG_L $24, PT_LO(sp)
mtlo $24
LONG_L $24, PT_HI(sp)
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index b928b6f898cd..e92d6c4b5ed1 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -75,9 +75,12 @@ do { \
#endif
#define __clear_software_ll_bit() \
-do { \
- if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
- ll_bit = 0; \
+do { if (cpu_has_rw_llb) { \
+ write_c0_lladdr(0); \
+ } else { \
+ if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
+ ll_bit = 0; \
+ } \
} while (0)
#define switch_to(prev, next, last) \
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index bb7963753730..6499d93ae68d 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -29,13 +29,7 @@
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
- /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
- if ((config_enabled(CONFIG_32BIT) ||
- test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
- (regs->regs[2] == __NR_syscall))
- return regs->regs[4];
- else
- return regs->regs[2];
+ return current_thread_info()->syscall;
}
static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 99eea59604e9..55ed6602204c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -28,14 +28,14 @@ struct thread_info {
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
-
+ int r2_emul_return; /* 1 => Returning from R2 emulator */
mm_segment_t addr_limit; /*
* thread address space limit:
* 0x7fffffff for user-thead
* 0xffffffff for kernel-thread
*/
- struct restart_block restart_block;
struct pt_regs *regs;
+ long syscall; /* syscall number */
};
/*
@@ -49,9 +49,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)