diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 20:58:29 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-16 20:58:29 +0200 |
commit | 714d8e7e27197dd39b2550e762a6a6fcf397a471 (patch) | |
tree | bc989a2a0e14f21912943e56d0002a26a2b7793e /arch/arm64/kvm | |
parent | Merge tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mp... (diff) | |
parent | arm64: fix midr range for Cortex-A57 erratum 832075 (diff) | |
download | linux-714d8e7e27197dd39b2550e762a6a6fcf397a471.tar.xz linux-714d8e7e27197dd39b2550e762a6a6fcf397a471.zip |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"Here are the core arm64 updates for 4.1.
Highlights include a significant rework to head.S (allowing us to boot
on machines with physical memory at a really high address), an AES
performance boost on Cortex-A57 and the ability to run a 32-bit
userspace with 64k pages (although this requires said userspace to be
built with a recent binutils).
The head.S rework spilt over into KVM, so there are some changes under
arch/arm/ which have been acked by Marc Zyngier (KVM co-maintainer).
In particular, the linker script changes caused us some issues in
-next, so there are a few merge commits where we had to apply fixes on
top of a stable branch.
Other changes include:
- AES performance boost for Cortex-A57
- AArch32 (compat) userspace with 64k pages
- Cortex-A53 erratum workaround for #845719
- defconfig updates (new platforms, PCI, ...)"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (39 commits)
arm64: fix midr range for Cortex-A57 erratum 832075
arm64: errata: add workaround for cortex-a53 erratum #845719
arm64: Use bool function return values of true/false not 1/0
arm64: defconfig: updates for 4.1
arm64: Extract feature parsing code from cpu_errata.c
arm64: alternative: Allow immediate branch as alternative instruction
arm64: insn: Add aarch64_insn_decode_immediate
ARM: kvm: round HYP section to page size instead of log2 upper bound
ARM: kvm: assert on HYP section boundaries not actual code size
arm64: head.S: ensure idmap_t0sz is visible
arm64: pmu: add support for interrupt-affinity property
dt: pmu: extend ARM PMU binding to allow for explicit interrupt affinity
arm64: head.S: ensure visibility of page tables
arm64: KVM: use ID map with increased VA range if required
arm64: mm: increase VA range of identity map
ARM: kvm: implement replacement for ld's LOG2CEIL()
arm64: proc: remove unused cpu_get_pgd macro
arm64: enforce x1|x2|x3 == 0 upon kernel entry as per boot protocol
arm64: remove __calc_phys_offset
arm64: merge __enable_mmu and __turn_mmu_on
...
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/hyp-init.S | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c3191168a994..178ba2248a98 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -20,6 +20,7 @@ #include <asm/assembler.h> #include <asm/kvm_arm.h> #include <asm/kvm_mmu.h> +#include <asm/pgtable-hwdef.h> .text .pushsection .hyp.idmap.text, "ax" @@ -65,6 +66,25 @@ __do_hyp_init: and x4, x4, x5 ldr x5, =TCR_EL2_FLAGS orr x4, x4, x5 + +#ifndef CONFIG_ARM64_VA_BITS_48 + /* + * If we are running with VA_BITS < 48, we may be running with an extra + * level of translation in the ID map. This is only the case if system + * RAM is out of range for the currently configured page size and number + * of translation levels, in which case we will also need the extra + * level for the HYP ID map, or we won't be able to enable the EL2 MMU. + * + * However, at EL2, there is only one TTBR register, and we can't switch + * between translation tables *and* update TCR_EL2.T0SZ at the same + * time. Bottom line: we need the extra level in *both* our translation + * tables. + * + * So use the same T0SZ value we use for the ID map. + */ + ldr_l x5, idmap_t0sz + bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH +#endif msr tcr_el2, x4 ldr x4, =VTCR_EL2_FLAGS @@ -91,6 +111,10 @@ __do_hyp_init: msr sctlr_el2, x4 isb + /* Skip the trampoline dance if we merged the boot and runtime PGDs */ + cmp x0, x1 + b.eq merged + /* MMU is now enabled. Get ready for the trampoline dance */ ldr x4, =TRAMPOLINE_VA adr x5, target @@ -105,6 +129,7 @@ target: /* We're now in the trampoline code, switch page tables */ tlbi alle2 dsb sy +merged: /* Set the stack and new vectors */ kern_hyp_va x2 mov sp, x2 |