summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-10-23 18:11:21 +0200
committerChristoffer Dall <christoffer.dall@linaro.org>2018-01-08 15:20:46 +0100
commit7a3796d2ef5bb948f709467eef1bf96edbfc67a0 (patch)
tree8b33f893ac17f71126f7960a7909d18be397353d /arch/arm
parentKVM: arm/arm64: Only clean the dcache on translation fault (diff)
downloadlinux-7a3796d2ef5bb948f709467eef1bf96edbfc67a0.tar.xz
linux-7a3796d2ef5bb948f709467eef1bf96edbfc67a0.zip
KVM: arm/arm64: Preserve Exec permission across R/W permission faults
So far, we loose the Exec property whenever we take permission faults, as we always reconstruct the PTE/PMD from scratch. This can be counter productive as we can end-up with the following fault sequence: X -> RO -> ROX -> RW -> RWX Instead, we can lookup the existing PTE/PMD and clear the XN bit in the new entry if it was already cleared in the old one, leadig to a much nicer fault sequence: X -> ROX -> RWX Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_mmu.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 4d7a54cbb3ab..aab64fe52146 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -107,6 +107,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)
return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
}
+static inline bool kvm_s2pte_exec(pte_t *pte)
+{
+ return !(pte_val(*pte) & L_PTE_XN);
+}
+
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{
pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
@@ -117,6 +122,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
}
+static inline bool kvm_s2pmd_exec(pmd_t *pmd)
+{
+ return !(pmd_val(*pmd) & PMD_SECT_XN);
+}
+
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);