summaryrefslogtreecommitdiffstats
path: root/arch/xtensa
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2012-12-03 12:01:43 +0100
committerChris Zankel <chris@zankel.net>2013-05-09 10:07:09 +0200
commite85e335f8ff615f74e29e09cc2599f095600114b (patch)
tree8f09bbab5ca6a37f027fef17bf1de523ab574f10 /arch/xtensa
parentxtensa: fix ibreakenable register update (diff)
downloadlinux-e85e335f8ff615f74e29e09cc2599f095600114b.tar.xz
linux-e85e335f8ff615f74e29e09cc2599f095600114b.zip
xtensa: add MMU v3 support
MMUv3 comes out of reset with identity vaddr -> paddr mapping in the TLB way 6: Way 6 (512 MB) Vaddr Paddr ASID Attr RWX Cache ---------- ---------- ---- ---- --- ------- 0x00000000 0x00000000 0x01 0x03 RWX Bypass 0x20000000 0x20000000 0x01 0x03 RWX Bypass 0x40000000 0x40000000 0x01 0x03 RWX Bypass 0x60000000 0x60000000 0x01 0x03 RWX Bypass 0x80000000 0x80000000 0x01 0x03 RWX Bypass 0xa0000000 0xa0000000 0x01 0x03 RWX Bypass 0xc0000000 0xc0000000 0x01 0x03 RWX Bypass 0xe0000000 0xe0000000 0x01 0x03 RWX Bypass This patch adds remapping code at the reset vector or at the kernel _start (depending on CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) that reconfigures MMUv3 as MMUv2: Way 5 (128 MB) Vaddr Paddr ASID Attr RWX Cache ---------- ---------- ---- ---- --- ------- 0xd0000000 0x00000000 0x01 0x07 RWX WB 0xd8000000 0x00000000 0x01 0x03 RWX Bypass Way 6 (256 MB) Vaddr Paddr ASID Attr RWX Cache ---------- ---------- ---- ---- --- ------- 0xe0000000 0xf0000000 0x01 0x07 RWX WB 0xf0000000 0xf0000000 0x01 0x03 RWX Bypass Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch/xtensa')
-rw-r--r--arch/xtensa/Kconfig29
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile1
-rw-r--r--arch/xtensa/boot/boot-elf/boot.lds.S64
-rw-r--r--arch/xtensa/boot/boot-elf/bootstrap.S101
-rw-r--r--arch/xtensa/boot/boot-uboot/Makefile6
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h107
-rw-r--r--arch/xtensa/include/asm/vectors.h125
-rw-r--r--arch/xtensa/kernel/Makefile2
-rw-r--r--arch/xtensa/kernel/head.S37
-rw-r--r--arch/xtensa/kernel/vectors.S3
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S48
-rw-r--r--arch/xtensa/mm/mmu.c14
12 files changed, 451 insertions, 86 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index b09de49dbec5..213ac3d9f950 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -103,6 +103,35 @@ config MATH_EMULATION
help
Can we use information of configuration file?
+config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ bool "Initialize Xtensa MMU inside the Linux kernel code"
+ default y
+ help
+ Earlier version initialized the MMU in the exception vector
+ before jumping to _startup in head.S and had an advantage that
+ it was possible to place a software breakpoint at 'reset' and
+ then enter your normal kernel breakpoints once the MMU was mapped
+ to the kernel mappings (0XC0000000).
+
+ This unfortunately doesn't work for U-Boot and likley also wont
+ work for using KEXEC to have a hot kernel ready for doing a
+ KDUMP.
+
+ So now the MMU is initialized in head.S but it's necessary to
+ use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup.
+ xt-gdb can't place a Software Breakpoint in the 0XD region prior
+ to mapping the MMU and after mapping even if the area of low memory
+ was mapped gdb wouldn't remove the breakpoint on hitting it as the
+ PC wouldn't match. Since Hardware Breakpoints are recommended for
+ Linux configurations it seems reasonable to just assume they exist
+ and leave this older mechanism for unfortunate souls that choose
+ not to follow Tensilica's recommendation.
+
+ Selecting this will cause U-Boot to set the KERNEL Load and Entry
+ address at 0x00003000 instead of the mapped std of 0xD0003000.
+
+ If in doubt, say Y.
+
endmenu
config XTENSA_CALIBRATE_CCOUNT
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
index 1fe01b78c124..89db089f5a12 100644
--- a/arch/xtensa/boot/boot-elf/Makefile
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -12,6 +12,7 @@ endif
export OBJCOPY_ARGS
export CPPFLAGS_boot.lds += -P -C
+export KBUILD_AFLAGS += -mtext-section-literals
boot-y := bootstrap.o
diff --git a/arch/xtensa/boot/boot-elf/boot.lds.S b/arch/xtensa/boot/boot-elf/boot.lds.S
index 7b646e0a6486..932b58ef33d4 100644
--- a/arch/xtensa/boot/boot-elf/boot.lds.S
+++ b/arch/xtensa/boot/boot-elf/boot.lds.S
@@ -1,41 +1,29 @@
-#include <variant/core.h>
+/*
+ * linux/arch/xtensa/boot/boot-elf/boot.lds.S
+ *
+ * Copyright (C) 2008 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com
+ * Pete Delaney <piet@tensilica.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/vectors.h>
OUTPUT_ARCH(xtensa)
ENTRY(_ResetVector)
SECTIONS
{
- .start 0xD0000000 : { *(.start) }
-
- .text 0xD0000000:
- {
- __reloc_start = . ;
- _text_start = . ;
- *(.literal .text.literal .text)
- _text_end = . ;
- }
-
- .rodata ALIGN(0x04):
- {
- *(.rodata)
- *(.rodata1)
- }
-
- .data ALIGN(0x04):
+ .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
{
- *(.data)
- *(.data1)
- *(.sdata)
- *(.sdata2)
- *(.got.plt)
- *(.got)
- *(.dynamic)
+ *(.ResetVector.text)
}
- __reloc_end = . ;
-
- . = ALIGN(0x10);
- __image_load = . ;
- .image 0xd0001000:
+ .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS)
{
_image_start = .;
*(image)
@@ -43,7 +31,6 @@ SECTIONS
_image_end = . ;
}
-
.bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3):
{
__bss_start = .;
@@ -53,14 +40,15 @@ SECTIONS
*(.bss)
__bss_end = .;
}
- _end = .;
- _param_start = .;
- .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ /*
+ * This is a remapped copy of the Reset Vector Code.
+ * It keeps gdb in sync with the PC after switching
+ * to the temporary mapping used while setting up
+ * the V2 MMU mappings for Linux.
+ */
+ .ResetVector.remapped_text 0x46000000 (INFO):
{
- *(.ResetVector.text)
+ *(.ResetVector.remapped_text)
}
-
-
- PROVIDE (end = .);
}
diff --git a/arch/xtensa/boot/boot-elf/bootstrap.S b/arch/xtensa/boot/boot-elf/bootstrap.S
index 464298bc348b..1388a499753b 100644
--- a/arch/xtensa/boot/boot-elf/bootstrap.S
+++ b/arch/xtensa/boot/boot-elf/bootstrap.S
@@ -1,18 +1,77 @@
+/*
+ * arch/xtensa/boot/boot-elf/bootstrap.S
+ *
+ * Low-level exception handling
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 - 2013 by Tensilica Inc.
+ *
+ * Chris Zankel <chris@zankel.net>
+ * Marc Gauthier <marc@tensilica.com>
+ * Piet Delaney <piet@tensilica.com>
+ */
#include <asm/bootparam.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/cacheasm.h>
+#include <asm/initialize_mmu.h>
+#include <linux/linkage.h>
-
-/* ResetVector
- */
- .section .ResetVector.text, "ax"
+ .section .ResetVector.text, "ax"
.global _ResetVector
+ .global reset
+
_ResetVector:
- _j reset
+ _j _SetupMMU
+
+ .begin no-absolute-literals
+ .literal_position
+
.align 4
RomInitAddr:
- .word 0xd0001000
+#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
+ XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ .word 0x00003000
+#else
+ .word 0xd0003000
+#endif
RomBootParam:
.word _bootparam
+_bootparam:
+ .short BP_TAG_FIRST
+ .short 4
+ .long BP_VERSION
+ .short BP_TAG_LAST
+ .short 0
+ .long 0
+
+ .align 4
+_SetupMMU:
+ movi a0, 0
+ wsr a0, windowbase
+ rsync
+ movi a0, 1
+ wsr a0, windowstart
+ rsync
+ movi a0, 0x1F
+ wsr a0, ps
+ rsync
+
+ Offset = _SetupMMU - _ResetVector
+
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
+
+ rsil a0, XCHAL_DEBUGLEVEL-1
+ rsync
reset:
l32r a0, RomInitAddr
l32r a2, RomBootParam
@@ -21,13 +80,25 @@ reset:
jx a0
.align 4
- .section .bootstrap.data, "aw"
- .globl _bootparam
-_bootparam:
- .short BP_TAG_FIRST
- .short 4
- .long BP_VERSION
- .short BP_TAG_LAST
- .short 0
- .long 0
+ .section .ResetVector.remapped_text, "x"
+ .global _RemappedResetVector
+
+ /* Do org before literals */
+ .org 0
+
+_RemappedResetVector:
+ .begin no-absolute-literals
+ .literal_position
+
+ _j _RemappedSetupMMU
+
+ /* Position Remapped code at the same location as the original code */
+ . = _RemappedResetVector + Offset
+
+_RemappedSetupMMU:
+#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+
+ .end no-absolute-literals
diff --git a/arch/xtensa/boot/boot-uboot/Makefile b/arch/xtensa/boot/boot-uboot/Makefile
index bfbf8af582f1..545759819ef9 100644
--- a/arch/xtensa/boot/boot-uboot/Makefile
+++ b/arch/xtensa/boot/boot-uboot/Makefile
@@ -4,7 +4,11 @@
# for more details.
#
-UIMAGE_LOADADDR = 0xd0001000
+ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+UIMAGE_LOADADDR = 0x00003000
+else
+UIMAGE_LOADADDR = 0xd0003000
+endif
UIMAGE_COMPRESSION = gzip
$(obj)/../uImage: vmlinux.bin.gz FORCE
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index e1f8ba4061ed..722553f17db3 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -23,6 +23,9 @@
#ifndef _XTENSA_INITIALIZE_MMU_H
#define _XTENSA_INITIALIZE_MMU_H
+#include <asm/pgtable.h>
+#include <asm/vectors.h>
+
#ifdef __ASSEMBLY__
#define XTENSA_HWVERSION_RC_2009_0 230000
@@ -48,6 +51,110 @@
* (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
*/
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+/*
+ * Have MMU v3
+ */
+
+#if !XCHAL_HAVE_VECBASE
+# error "MMU v3 requires reloc vectors"
+#endif
+
+ movi a1, 0
+ _call0 1f
+ _j 2f
+
+ .align 4
+1: movi a2, 0x10000000
+ movi a3, 0x18000000
+ add a2, a2, a0
+9: bgeu a2, a3, 9b /* PC is out of the expected range */
+
+ /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
+
+ movi a2, 0x40000006
+ idtlb a2
+ iitlb a2
+ isync
+
+ /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code
+ * and jump to the new mapping.
+ */
+#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC)
+
+ srli a3, a0, 27
+ slli a3, a3, 27
+ addi a3, a3, CA_BYPASS
+ addi a7, a2, -1
+ wdtlb a3, a7
+ witlb a3, a7
+ isync
+
+ slli a4, a0, 5
+ srli a4, a4, 5
+ addi a5, a2, -6
+ add a4, a4, a5
+ jx a4
+
+ /* Step 3: unmap everything other than current area.
+ * Start at 0x60000000, wrap around, and end with 0x20000000
+ */
+2: movi a4, 0x20000000
+ add a5, a2, a4
+3: idtlb a5
+ iitlb a5
+ add a5, a5, a4
+ bne a5, a2, 3b
+
+ /* Step 4: Setup MMU with the old V2 mappings. */
+ movi a6, 0x01000000
+ wsr a6, ITLBCFG
+ wsr a6, DTLBCFG
+ isync
+
+ movi a5, 0xd0000005
+ movi a4, CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, 0xd8000005
+ movi a4, CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, 0xe0000006
+ movi a4, 0xf0000000 + CA_WRITEBACK
+ wdtlb a4, a5
+ witlb a4, a5
+
+ movi a5, 0xf0000006
+ movi a4, 0xf0000000 + CA_BYPASS
+ wdtlb a4, a5
+ witlb a4, a5
+
+ isync
+
+ /* Jump to self, using MMU v2 mappings. */
+ movi a4, 1f
+ jx a4
+
+1:
+ movi a2, VECBASE_RESET_VADDR
+ wsr a2, vecbase
+
+ /* Step 5: remove temporary mapping. */
+ idtlb a7
+ iitlb a7
+ isync
+
+ movi a0, 0
+ wsr a0, ptevaddr
+ rsync
+
+#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
+ XCHAL_HAVE_SPANNING_WAY */
+
.endm
#endif /*__ASSEMBLY__*/
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
new file mode 100644
index 000000000000..c52b656d0310
--- /dev/null
+++ b/arch/xtensa/include/asm/vectors.h
@@ -0,0 +1,125 @@
+/*
+ * arch/xtensa/include/asm/xchal_vaddr_remap.h
+ *
+ * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual
+ * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU
+ * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 - 2012 Tensilica Inc.
+ *
+ * Pete Delaney <piet@tensilica.com>
+ * Marc Gauthier <marc@tensilica.com
+ */
+
+#ifndef _XTENSA_VECTORS_H
+#define _XTENSA_VECTORS_H
+
+#include <variant/core.h>
+
+#if defined(CONFIG_MMU)
+
+/* Will Become VECBASE */
+#define VIRTUAL_MEMORY_ADDRESS 0xD0000000
+
+/* Image Virtual Start Address */
+#define KERNELOFFSET 0xD0003000
+
+#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+ /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */
+ #define PHYSICAL_MEMORY_ADDRESS 0x00000000
+ #define LOAD_MEMORY_ADDRESS 0x00003000
+#else
+ /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */
+ #define PHYSICAL_MEMORY_ADDRESS 0xD0000000
+ #define LOAD_MEMORY_ADDRESS 0xD0003000
+#endif
+
+#else /* !defined(CONFIG_MMU) */
+ /* MMU Not being used - Virtual == Physical */
+
+ /* VECBASE */
+ #define VIRTUAL_MEMORY_ADDRESS 0x00002000
+
+ /* Location of the start of the kernel text, _start */
+ #define KERNELOFFSET 0x00003000
+ #define PHYSICAL_MEMORY_ADDRESS 0x00000000
+
+ /* Loaded just above possibly live vectors */
+ #define LOAD_MEMORY_ADDRESS 0x00003000
+
+#endif /* CONFIG_MMU */
+
+#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset)
+#define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset)
+
+/* Used to set VECBASE register */
+#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS
+
+#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \
+ VECBASE_RESET_VADDR)
+#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS)
+
+#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \
+ VECBASE_RESET_VADDR)
+#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS)
+
+#if XCHAL_HAVE_VECBASE
+
+#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS)
+#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS)
+#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS)
+#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS)
+#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS)
+#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS)
+#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS)
+#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS)
+#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS)
+
+#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS)
+
+#undef XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS)
+
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS)
+
+/*
+ * These XCHAL_* #defines from varian/core.h
+ * are not valid to use with V3 MMU. Non-XCHAL
+ * constants are defined above and should be used.
+ */
+#undef XCHAL_VECBASE_RESET_VADDR
+#undef XCHAL_RESET_VECTOR0_VADDR
+#undef XCHAL_USER_VECTOR_VADDR
+#undef XCHAL_KERNEL_VECTOR_VADDR
+#undef XCHAL_DOUBLEEXC_VECTOR_VADDR
+#undef XCHAL_WINDOW_VECTORS_VADDR
+#undef XCHAL_INTLEVEL2_VECTOR_VADDR
+#undef XCHAL_INTLEVEL3_VECTOR_VADDR
+#undef XCHAL_INTLEVEL4_VECTOR_VADDR
+#undef XCHAL_INTLEVEL5_VECTOR_VADDR
+#undef XCHAL_INTLEVEL6_VECTOR_VADDR
+#undef XCHAL_DEBUG_VECTOR_VADDR
+#undef XCHAL_NMI_VECTOR_VADDR
+#undef XCHAL_INTLEVEL7_VECTOR_VADDR
+
+#else
+
+#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR
+#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR
+#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR
+#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR
+#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR
+#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR
+#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR
+#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR
+
+#endif
+
+#endif /* _XTENSA_VECTORS_H */
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index c3a59d992ac0..c433a56fbff9 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
+AFLAGS_head.o += -mtext-section-literals
+
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
# Therefore, the .text and .literal section must be combined in parenthesis
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 4566683abc8d..ef12c0e6fa25 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -48,17 +48,36 @@
*/
__HEAD
+ .begin no-absolute-literals
+
ENTRY(_start)
- _j 2f
+ /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
+ wsr a2, excsave1
+ _j _SetupMMU
+
+ .align 4
+ .literal_position
+.Lstartup:
+ .word _startup
+
.align 4
-1: .word _startup
-2: l32r a0, 1b
+ .global _SetupMMU
+_SetupMMU:
+ Offset = _SetupMMU - _start
+
+#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
+#endif
+ .end no-absolute-literals
+
+ l32r a0, .Lstartup
jx a0
ENDPROC(_start)
- .section .init.text, "ax"
+ __INIT
+ .literal_position
ENTRY(_startup)
@@ -67,10 +86,6 @@ ENTRY(_startup)
movi a0, LOCKLEVEL
wsr a0, ps
- /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
-
- wsr a2, excsave1
-
/* Start with a fresh windowbase and windowstart. */
movi a1, 1
@@ -158,8 +173,6 @@ ENTRY(_startup)
isync
- initialize_mmu
-
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
@@ -207,6 +220,10 @@ ENTRY(_startup)
___flush_dcache_all a2 a3
#endif
+ memw
+ isync
+ ___invalidate_icache_all a2 a3
+ isync
/* Setup stack and enable window exceptions (keep irqs disabled) */
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 82109b42e240..a7e1d0834c68 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -50,6 +50,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/vectors.h>
#define WINDOW_VECTORS_SIZE 0x180
@@ -220,7 +221,7 @@ ENTRY(_DoubleExceptionVector)
xsr a0, depc # get DEPC, save a0
- movi a3, XCHAL_WINDOW_VECTORS_VADDR
+ movi a3, WINDOW_VECTORS_VADDR
_bltu a0, a3, .Lfixup
addi a3, a3, WINDOW_VECTORS_SIZE
_bgeu a0, a3, .Lfixup
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 14695240536d..21acd11b5df2 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -18,6 +18,7 @@
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/vectors.h>
#include <variant/core.h>
#include <platform/hardware.h>
OUTPUT_ARCH(xtensa)
@@ -30,7 +31,7 @@ jiffies = jiffies_64;
#endif
#ifndef KERNELOFFSET
-#define KERNELOFFSET 0xd0001000
+#define KERNELOFFSET 0xd0003000
#endif
/* Note: In the following macros, it would be nice to specify only the
@@ -185,16 +186,16 @@ SECTIONS
SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text,
- XCHAL_WINDOW_VECTORS_VADDR, 4,
+ WINDOW_VECTORS_VADDR, 4,
.dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
- XCHAL_DEBUG_VECTOR_VADDR - 4,
+ DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
- XCHAL_DEBUG_VECTOR_VADDR,
+ DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
#undef LAST
@@ -202,7 +203,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 2
SECTION_VECTOR (_Level2InterruptVector_text,
.Level2InterruptVector.text,
- XCHAL_INTLEVEL2_VECTOR_VADDR,
+ INTLEVEL2_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level2InterruptVector.text
@@ -210,7 +211,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 3
SECTION_VECTOR (_Level3InterruptVector_text,
.Level3InterruptVector.text,
- XCHAL_INTLEVEL3_VECTOR_VADDR,
+ INTLEVEL3_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level3InterruptVector.text
@@ -218,7 +219,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 4
SECTION_VECTOR (_Level4InterruptVector_text,
.Level4InterruptVector.text,
- XCHAL_INTLEVEL4_VECTOR_VADDR,
+ INTLEVEL4_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level4InterruptVector.text
@@ -226,7 +227,7 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 5
SECTION_VECTOR (_Level5InterruptVector_text,
.Level5InterruptVector.text,
- XCHAL_INTLEVEL5_VECTOR_VADDR,
+ INTLEVEL5_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level5InterruptVector.text
@@ -234,39 +235,39 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 6
SECTION_VECTOR (_Level6InterruptVector_text,
.Level6InterruptVector.text,
- XCHAL_INTLEVEL6_VECTOR_VADDR,
+ INTLEVEL6_VECTOR_VADDR,
SIZEOF(LAST), LAST)
# undef LAST
# define LAST .Level6InterruptVector.text
#endif
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
- XCHAL_KERNEL_VECTOR_VADDR - 4,
+ KERNEL_VECTOR_VADDR - 4,
SIZEOF(LAST), LAST)
#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
- XCHAL_KERNEL_VECTOR_VADDR,
+ KERNEL_VECTOR_VADDR,
4,
.KernelExceptionVector.literal)
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
- XCHAL_USER_VECTOR_VADDR - 4,
+ USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
- XCHAL_USER_VECTOR_VADDR,
+ USER_VECTOR_VADDR,
4,
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal,
- XCHAL_DOUBLEEXC_VECTOR_VADDR - 16,
+ DOUBLEEXC_VECTOR_VADDR - 16,
SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
- XCHAL_DOUBLEEXC_VECTOR_VADDR,
+ DOUBLEEXC_VECTOR_VADDR,
32,
.DoubleExceptionVector.literal)
@@ -284,11 +285,26 @@ SECTIONS
. = ALIGN(0x10);
.bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) }
- .ResetVector.text XCHAL_RESET_VECTOR_VADDR :
+ .ResetVector.text RESET_VECTOR_VADDR :
{
*(.ResetVector.text)
}
+
+ /*
+ * This is a remapped copy of the Secondary Reset Vector Code.
+ * It keeps gdb in sync with the PC after switching
+ * to the temporary mapping used while setting up
+ * the V2 MMU mappings for Linux.
+ *
+ * Only debug information about this section is put in the kernel image.
+ */
+ .SecondaryResetVector.remapped_text 0x46000000 (INFO):
+ {
+ *(.SecondaryResetVector.remapped_text)
+ }
+
+
.xt.lit : { *(.xt.lit) }
.xt.prop : { *(.xt.prop) }
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 0f77f9d3bb8b..a1077570e383 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -24,15 +24,19 @@ void __init paging_init(void)
*/
void __init init_mmu(void)
{
- /* Writing zeros to the <t>TLBCFG special registers ensure
- * that valid values exist in the register. For existing
- * PGSZID<w> fields, zero selects the first element of the
- * page-size array. For nonexistent PGSZID<w> fields, zero is
- * the best value to write. Also, when changing PGSZID<w>
+#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
+ /*
+ * Writing zeros to the instruction and data TLBCFG special
+ * registers ensure that valid values exist in the register.
+ *
+ * For existing PGSZID<w> fields, zero selects the first element
+ * of the page-size array. For nonexistent PGSZID<w> fields,
+ * zero is the best value to write. Also, when changing PGSZID<w>
* fields, the corresponding TLB must be flushed.
*/
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
+#endif
flush_tlb_all();
/* Set rasid register to a known value. */