diff options
author | Ard Biesheuvel <ardb@kernel.org> | 2023-01-25 19:59:10 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2023-01-26 18:42:58 +0100 |
commit | 2ced0f30a426c7301350681f838344d5aea711e3 (patch) | |
tree | 5405b26c5d824a28a017765cb85bf06f87941e4f /arch/arm64/kernel | |
parent | efi: arm64: enter with MMU and caches enabled (diff) | |
download | linux-2ced0f30a426c7301350681f838344d5aea711e3.tar.xz linux-2ced0f30a426c7301350681f838344d5aea711e3.zip |
arm64: head: Switch endianness before populating the ID map
Ensure that the endianness used for populating the ID map matches the
endianness that the running kernel will be using, as this is no longer
guaranteed now that create_idmap() is invoked before init_kernel_el().
Note that doing so is only safe if the MMU is off, as switching the
endianness with the MMU on results in the active ID map to become
invalid. So also clear the M bit when toggling the EE bit in SCTLR, and
mark the MMU as disabled at boot.
Note that the same issue has resulted in preserve_boot_args() recording
the contents of registers X0 ... X3 in the wrong byte order, although
this is arguably a very minor concern.
Fixes: 32b135a7fafe ("arm64: head: avoid cache invalidation when entering with the MMU on")
Reported-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Link: https://lore.kernel.org/r/20230125185910.962733-1-ardb@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r-- | arch/arm64/kernel/head.S | 23 |
1 files changed, 22 insertions, 1 deletions
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index dc56e1d8f36e..107a2b87577c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -129,10 +129,31 @@ SYM_CODE_START_LOCAL(record_mmu_state) mrs x19, sctlr_el1 b.ne 0f mrs x19, sctlr_el2 -0: tst x19, #SCTLR_ELx_C // Z := (C == 0) +0: +CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f ) +CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f ) + tst x19, #SCTLR_ELx_C // Z := (C == 0) and x19, x19, #SCTLR_ELx_M // isolate M bit csel x19, xzr, x19, eq // clear x19 if Z ret + + /* + * Set the correct endianness early so all memory accesses issued + * before init_kernel_el() occur in the correct byte order. Note that + * this means the MMU must be disabled, or the active ID map will end + * up getting interpreted with the wrong byte order. + */ +1: eor x19, x19, #SCTLR_ELx_EE + bic x19, x19, #SCTLR_ELx_M + b.ne 2f + pre_disable_mmu_workaround + msr sctlr_el2, x19 + b 3f + pre_disable_mmu_workaround +2: msr sctlr_el1, x19 +3: isb + mov x19, xzr + ret SYM_CODE_END(record_mmu_state) /* |