summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-02-27 18:38:08 +0100
committerMarc Zyngier <marc.zyngier@arm.com>2018-03-19 14:06:01 +0100
commit71dcb8be6d29cffff3f4a4463232f38786e97797 (patch)
treebb065a9d3f86c74f0324e3deeaa2f12e301ed70e /arch/arm64/kernel
parentarm64: KVM: Reserve 4 additional instructions in the BPI template (diff)
downloadlinux-71dcb8be6d29cffff3f4a4463232f38786e97797.tar.xz
linux-71dcb8be6d29cffff3f4a4463232f38786e97797.zip
arm64: KVM: Allow far branches from vector slots to the main vectors
So far, the branch from the vector slots to the main vectors can at most be 4GB from the main vectors (the reach of ADRP), and this distance is known at compile time. If we were to remap the slots to an unrelated VA, things would break badly. A way to achieve VA independence would be to load the absolute address of the vectors (__kvm_hyp_vector), either using a constant pool or a series of movs, followed by an indirect branch. This patches implements the latter solution, using another instance of a patching callback. Note that since we have to save a register pair on the stack, we branch to the *second* instruction in the vectors in order to compensate for it. This also results in having to adjust this balance in the invalid vector entry point. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/bpi.S21
1 files changed, 21 insertions, 0 deletions
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
index ce1cfe3b24e6..dc51ef2ce98a 100644
--- a/arch/arm64/kernel/bpi.S
+++ b/arch/arm64/kernel/bpi.S
@@ -19,16 +19,37 @@
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
+#include <asm/alternative.h>
+
.macro hyp_ventry
.align 7
1: .rept 27
nop
.endr
+/*
+ * The default sequence is to directly branch to the KVM vectors,
+ * using the computed offset. This applies for VHE as well as
+ * !ARM64_HARDEN_EL2_VECTORS.
+ *
+ * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
+ * with:
+ *
+ * stp x0, x1, [sp, #-16]!
+ * movz x0, #(addr & 0xffff)
+ * movk x0, #((addr >> 16) & 0xffff), lsl #16
+ * movk x0, #((addr >> 32) & 0xffff), lsl #32
+ * br x0
+ *
+ * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
+ * See kvm_patch_vector_branch for details.
+ */
+alternative_cb kvm_patch_vector_branch
b __kvm_hyp_vector + (1b - 0b)
nop
nop
nop
nop
+alternative_cb_end
.endm
.macro generate_vectors