From 7aef0cbcdcd0995efde9957b3eda9f31a219613d Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Fri, 19 Mar 2021 10:01:14 +0000 Subject: KVM: arm64: Factor memory allocation out of pgtable.c In preparation for enabling the creation of page-tables at EL2, factor all memory allocation out of the page-table code, hence making it re-usable with any compatible memory allocator. No functional changes intended. Acked-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210319100146.1149909-7-qperret@google.com --- arch/arm64/kvm/mmu.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm/mmu.c') diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 8711894db8c2..e583f7fb3620 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -88,6 +88,44 @@ static bool kvm_is_device_pfn(unsigned long pfn) return !pfn_valid(pfn); } +static void *stage2_memcache_zalloc_page(void *arg) +{ + struct kvm_mmu_memory_cache *mc = arg; + + /* Allocated with __GFP_ZERO, so no need to zero */ + return kvm_mmu_memory_cache_alloc(mc); +} + +static void *kvm_host_zalloc_pages_exact(size_t size) +{ + return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); +} + +static void kvm_host_get_page(void *addr) +{ + get_page(virt_to_page(addr)); +} + +static void kvm_host_put_page(void *addr) +{ + put_page(virt_to_page(addr)); +} + +static int kvm_host_page_count(void *addr) +{ + return page_count(virt_to_page(addr)); +} + +static phys_addr_t kvm_host_pa(void *addr) +{ + return __pa(addr); +} + +static void *kvm_host_va(phys_addr_t phys) +{ + return __va(phys); +} + /* * Unmapping vs dcache management: * @@ -351,6 +389,17 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, return 0; } +static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { + .zalloc_page = stage2_memcache_zalloc_page, + .zalloc_pages_exact = kvm_host_zalloc_pages_exact, + .free_pages_exact = free_pages_exact, + .get_page = kvm_host_get_page, + .put_page = kvm_host_put_page, + .page_count = kvm_host_page_count, + .phys_to_virt = kvm_host_va, + .virt_to_phys = kvm_host_pa, +}; + /** * kvm_init_stage2_mmu - Initialise a S2 MMU strucrure * @kvm: The pointer to the KVM structure @@ -374,7 +423,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) if (!pgt) return -ENOMEM; - err = kvm_pgtable_stage2_init(pgt, kvm); + err = kvm_pgtable_stage2_init(pgt, kvm, &kvm_s2_mm_ops); if (err) goto out_free_pgtable; @@ -1208,6 +1257,19 @@ static int kvm_map_idmap_text(void) return err; } +static void *kvm_hyp_zalloc_page(void *arg) +{ + return (void *)get_zeroed_page(GFP_KERNEL); +} + +static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { + .zalloc_page = kvm_hyp_zalloc_page, + .get_page = kvm_host_get_page, + .put_page = kvm_host_put_page, + .phys_to_virt = kvm_host_va, + .virt_to_phys = kvm_host_pa, +}; + int kvm_mmu_init(void) { int err; @@ -1251,7 +1313,7 @@ int kvm_mmu_init(void) goto out; } - err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits); + err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops); if (err) goto out_free_pgtable; -- cgit v1.2.3 From bfa79a805454f768b8d76ab683659d9e219a037a Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Fri, 19 Mar 2021 10:01:26 +0000 Subject: KVM: arm64: Elevate hypervisor mappings creation at EL2 Previous commits have introduced infrastructure to enable the EL2 code to manage its own stage 1 mappings. However, this was preliminary work, and none of it is currently in use. Put all of this together by elevating the mapping creation at EL2 when memory protection is enabled. In this case, the host kernel running at EL1 still creates _temporary_ EL2 mappings, only used while initializing the hypervisor, but frees them right after. As such, all calls to create_hyp_mappings() after kvm init has finished turn into hypercalls, as the host now has no 'legal' way to modify the hypevisor page tables directly. Acked-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210319100146.1149909-19-qperret@google.com --- arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/kvm/arm.c | 87 +++++++++++++++++++++++++++++++++++++--- arch/arm64/kvm/mmu.c | 43 +++++++++++++++++--- 3 files changed, 120 insertions(+), 12 deletions(-) (limited to 'arch/arm64/kvm/mmu.c') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 5c42ec023cc7..ce02a4052dcf 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -166,7 +166,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); phys_addr_t kvm_get_idmap_vector(void); -int kvm_mmu_init(void); +int kvm_mmu_init(u32 *hyp_va_bits); static inline void *__kvm_vector_slot2addr(void *base, enum arm64_hyp_spectre_vector slot) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e2c471117bff..d93ea0b82491 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1426,7 +1426,7 @@ static void cpu_prepare_hyp_mode(int cpu) kvm_flush_dcache_to_poc(params, sizeof(*params)); } -static void cpu_init_hyp_mode(void) +static void hyp_install_host_vector(void) { struct kvm_nvhe_init_params *params; struct arm_smccc_res res; @@ -1444,6 +1444,11 @@ static void cpu_init_hyp_mode(void) params = this_cpu_ptr_nvhe_sym(kvm_init_params); arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); WARN_ON(res.a0 != SMCCC_RET_SUCCESS); +} + +static void cpu_init_hyp_mode(void) +{ + hyp_install_host_vector(); /* * Disabling SSBD on a non-VHE system requires us to enable SSBS @@ -1486,7 +1491,10 @@ static void cpu_set_hyp_vector(void) struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); void *vector = hyp_spectre_vector_selector[data->slot]; - *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; + if (!is_protected_kvm_enabled()) + *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; + else + kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); } static void cpu_hyp_reinit(void) @@ -1494,13 +1502,14 @@ static void cpu_hyp_reinit(void) kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); cpu_hyp_reset(); - cpu_set_hyp_vector(); if (is_kernel_in_hyp_mode()) kvm_timer_init_vhe(); else cpu_init_hyp_mode(); + cpu_set_hyp_vector(); + kvm_arm_init_debug(); if (vgic_present) @@ -1696,18 +1705,59 @@ static void teardown_hyp_mode(void) } } +static int do_pkvm_init(u32 hyp_va_bits) +{ + void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base); + int ret; + + preempt_disable(); + hyp_install_host_vector(); + ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, + num_possible_cpus(), kern_hyp_va(per_cpu_base), + hyp_va_bits); + preempt_enable(); + + return ret; +} + +static int kvm_hyp_init_protection(u32 hyp_va_bits) +{ + void *addr = phys_to_virt(hyp_mem_base); + int ret; + + ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); + if (ret) + return ret; + + ret = do_pkvm_init(hyp_va_bits); + if (ret) + return ret; + + free_hyp_pgds(); + + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ static int init_hyp_mode(void) { + u32 hyp_va_bits; int cpu; - int err = 0; + int err = -ENOMEM; + + /* + * The protected Hyp-mode cannot be initialized if the memory pool + * allocation has failed. + */ + if (is_protected_kvm_enabled() && !hyp_mem_base) + goto out_err; /* * Allocate Hyp PGD and setup Hyp identity mapping */ - err = kvm_mmu_init(); + err = kvm_mmu_init(&hyp_va_bits); if (err) goto out_err; @@ -1823,6 +1873,14 @@ static int init_hyp_mode(void) goto out_err; } + if (is_protected_kvm_enabled()) { + err = kvm_hyp_init_protection(hyp_va_bits); + if (err) { + kvm_err("Failed to init hyp memory protection\n"); + goto out_err; + } + } + return 0; out_err: @@ -1831,6 +1889,16 @@ out_err: return err; } +static int finalize_hyp_mode(void) +{ + if (!is_protected_kvm_enabled()) + return 0; + + static_branch_enable(&kvm_protected_mode_initialized); + + return 0; +} + static void check_kvm_target_cpu(void *ret) { *(int *)ret = kvm_target_cpu(); @@ -1942,8 +2010,15 @@ int kvm_arch_init(void *opaque) if (err) goto out_hyp; + if (!in_hyp_mode) { + err = finalize_hyp_mode(); + if (err) { + kvm_err("Failed to finalize Hyp protection\n"); + goto out_hyp; + } + } + if (is_protected_kvm_enabled()) { - static_branch_enable(&kvm_protected_mode_initialized); kvm_info("Protected nVHE mode initialized successfully\n"); } else if (in_hyp_mode) { kvm_info("VHE mode initialized successfully\n"); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index e583f7fb3620..de0ad79d2c90 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -221,15 +221,39 @@ void free_hyp_pgds(void) if (hyp_pgtable) { kvm_pgtable_hyp_destroy(hyp_pgtable); kfree(hyp_pgtable); + hyp_pgtable = NULL; } mutex_unlock(&kvm_hyp_pgd_mutex); } +static bool kvm_host_owns_hyp_mappings(void) +{ + if (static_branch_likely(&kvm_protected_mode_initialized)) + return false; + + /* + * This can happen at boot time when __create_hyp_mappings() is called + * after the hyp protection has been enabled, but the static key has + * not been flipped yet. + */ + if (!hyp_pgtable && is_protected_kvm_enabled()) + return false; + + WARN_ON(!hyp_pgtable); + + return true; +} + static int __create_hyp_mappings(unsigned long start, unsigned long size, unsigned long phys, enum kvm_pgtable_prot prot) { int err; + if (!kvm_host_owns_hyp_mappings()) { + return kvm_call_hyp_nvhe(__pkvm_create_mappings, + start, size, phys, prot); + } + mutex_lock(&kvm_hyp_pgd_mutex); err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); mutex_unlock(&kvm_hyp_pgd_mutex); @@ -291,6 +315,16 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, unsigned long base; int ret = 0; + if (!kvm_host_owns_hyp_mappings()) { + base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, + phys_addr, size, prot); + if (IS_ERR_OR_NULL((void *)base)) + return PTR_ERR((void *)base); + *haddr = base; + + return 0; + } + mutex_lock(&kvm_hyp_pgd_mutex); /* @@ -1270,10 +1304,9 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { .virt_to_phys = kvm_host_pa, }; -int kvm_mmu_init(void) +int kvm_mmu_init(u32 *hyp_va_bits) { int err; - u32 hyp_va_bits; hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); @@ -1287,8 +1320,8 @@ int kvm_mmu_init(void) */ BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); - hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); - kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits); + *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET); + kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits); kvm_debug("IDMAP page: %lx\n", hyp_idmap_start); kvm_debug("HYP VA range: %lx:%lx\n", kern_hyp_va(PAGE_OFFSET), @@ -1313,7 +1346,7 @@ int kvm_mmu_init(void) goto out; } - err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits, &kvm_hyp_mm_ops); + err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); if (err) goto out_free_pgtable; -- cgit v1.2.3 From 834cd93deb75f3a43420e479f133dd02fba95aa6 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Fri, 19 Mar 2021 10:01:27 +0000 Subject: KVM: arm64: Use kvm_arch for stage 2 pgtable In order to make use of the stage 2 pgtable code for the host stage 2, use struct kvm_arch in lieu of struct kvm as the host will have the former but not the latter. Acked-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210319100146.1149909-20-qperret@google.com --- arch/arm64/include/asm/kvm_pgtable.h | 5 +++-- arch/arm64/kvm/hyp/pgtable.c | 6 +++--- arch/arm64/kvm/mmu.c | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/arm64/kvm/mmu.c') diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index bf7a3cc49420..7945ec87eaec 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -162,12 +162,13 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, /** * kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. * @pgt: Uninitialised page-table structure to initialise. - * @kvm: KVM structure representing the guest virtual machine. + * @arch: Arch-specific KVM structure representing the guest virtual + * machine. * @mm_ops: Memory management callbacks. * * Return: 0 on success, negative error code on failure. */ -int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm, +int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, struct kvm_pgtable_mm_ops *mm_ops); /** diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 82aca35a22f6..ea95bbc6ba80 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -880,11 +880,11 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) return kvm_pgtable_walk(pgt, addr, size, &walker); } -int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm, +int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, struct kvm_pgtable_mm_ops *mm_ops) { size_t pgd_sz; - u64 vtcr = kvm->arch.vtcr; + u64 vtcr = arch->vtcr; u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; @@ -897,7 +897,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm, pgt->ia_bits = ia_bits; pgt->start_level = start_level; pgt->mm_ops = mm_ops; - pgt->mmu = &kvm->arch.mmu; + pgt->mmu = &arch->mmu; /* Ensure zeroed PGD pages are visible to the hardware walker */ dsb(ishst); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index de0ad79d2c90..d6eb1fb21232 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -457,7 +457,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) if (!pgt) return -ENOMEM; - err = kvm_pgtable_stage2_init(pgt, kvm, &kvm_s2_mm_ops); + err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); if (err) goto out_free_pgtable; -- cgit v1.2.3 From cfb1a98de7a9aa51931ff5b336fc5c3c201d01cc Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Fri, 19 Mar 2021 10:01:28 +0000 Subject: KVM: arm64: Use kvm_arch in kvm_s2_mmu In order to make use of the stage 2 pgtable code for the host stage 2, change kvm_s2_mmu to use a kvm_arch pointer in lieu of the kvm pointer, as the host will have the former but not the latter. Acked-by: Will Deacon Signed-off-by: Quentin Perret Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210319100146.1149909-21-qperret@google.com --- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/include/asm/kvm_mmu.h | 6 +++++- arch/arm64/kvm/mmu.c | 8 ++++---- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'arch/arm64/kvm/mmu.c') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f813e1191027..4859c9de75d7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -94,7 +94,7 @@ struct kvm_s2_mmu { /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; - struct kvm *kvm; + struct kvm_arch *arch; }; struct kvm_arch_memory_slot { diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index ce02a4052dcf..6f743e20cb06 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -272,7 +272,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) */ static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) { - write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2); + write_sysreg(kern_hyp_va(mmu->arch)->vtcr, vtcr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); /* @@ -283,5 +283,9 @@ static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu) asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); } +static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) +{ + return container_of(mmu->arch, struct kvm, arch); +} #endif /* __ASSEMBLY__ */ #endif /* __ARM64_KVM_MMU_H__ */ diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index d6eb1fb21232..0f16b70befa8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -165,7 +165,7 @@ static void *kvm_host_va(phys_addr_t phys) static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, bool may_block) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); phys_addr_t end = start + size; assert_spin_locked(&kvm->mmu_lock); @@ -470,7 +470,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) for_each_possible_cpu(cpu) *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; - mmu->kvm = kvm; + mmu->arch = &kvm->arch; mmu->pgt = pgt; mmu->pgd_phys = __pa(pgt->pgd); mmu->vmid.vmid_gen = 0; @@ -552,7 +552,7 @@ void stage2_unmap_vm(struct kvm *kvm) void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); struct kvm_pgtable *pgt = NULL; spin_lock(&kvm->mmu_lock); @@ -621,7 +621,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, */ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) { - struct kvm *kvm = mmu->kvm; + struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect); } -- cgit v1.2.3 From eab62148478d339a37c7a6b37d34182ccf5056ad Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Tue, 16 Mar 2021 12:11:24 +0800 Subject: KVM: arm64: Hide kvm_mmu_wp_memory_region() We needn't expose the function as it's only used by mmu.c since it was introduced by commit c64735554c0a ("KVM: arm: Add initial dirty page locking support"). Signed-off-by: Gavin Shan Reviewed-by: Keqian Zhu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210316041126.81860-2-gshan@redhat.com --- arch/arm64/include/asm/kvm_host.h | 1 - arch/arm64/kvm/mmu.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm64/kvm/mmu.c') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3d10e6527f7d..688f2df1957b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -632,7 +632,6 @@ void kvm_arm_resume_guest(struct kvm *kvm); }) void force_vm_exit(const cpumask_t *mask); -void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); int handle_exit(struct kvm_vcpu *vcpu, int exception_index); void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 8711894db8c2..28f3b3736dc8 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -555,7 +555,7 @@ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_ * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, * serializing operations for VM memory regions. */ -void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) +static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) { struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); -- cgit v1.2.3 From c728fd4ce75e9c342ea96facc5a2fe5ddb976a67 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Tue, 16 Mar 2021 12:11:25 +0800 Subject: KVM: arm64: Use find_vma_intersection() find_vma_intersection() has been existing to search the intersected vma. This uses the function where it's applicable, to simplify the code. Signed-off-by: Gavin Shan Reviewed-by: Keqian Zhu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210316041126.81860-3-gshan@redhat.com --- arch/arm64/kvm/mmu.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/arm64/kvm/mmu.c') diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 28f3b3736dc8..192e0df2fc8e 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -421,10 +421,11 @@ static void stage2_unmap_memslot(struct kvm *kvm, * +--------------------------------------------+ */ do { - struct vm_area_struct *vma = find_vma(current->mm, hva); + struct vm_area_struct *vma; hva_t vm_start, vm_end; - if (!vma || vma->vm_start >= reg_end) + vma = find_vma_intersection(current->mm, hva, reg_end); + if (!vma) break; /* @@ -1329,10 +1330,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * +--------------------------------------------+ */ do { - struct vm_area_struct *vma = find_vma(current->mm, hva); + struct vm_area_struct *vma; hva_t vm_start, vm_end; - if (!vma || vma->vm_start >= reg_end) + vma = find_vma_intersection(current->mm, hva, reg_end); + if (!vma) break; /* -- cgit v1.2.3 From 10ba2d17d2972926c60e01dace6d7a3f8d968c4f Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Tue, 16 Mar 2021 12:11:26 +0800 Subject: KVM: arm64: Don't retrieve memory slot again in page fault handler We needn't retrieve the memory slot again in user_mem_abort() because the corresponding memory slot has been passed from the caller. This would save some CPU cycles. For example, the time used to write 1GB memory, which is backed by 2MB hugetlb pages and write-protected, is dropped by 6.8% from 928ms to 864ms. Signed-off-by: Gavin Shan Reviewed-by: Keqian Zhu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210316041126.81860-4-gshan@redhat.com --- arch/arm64/kvm/mmu.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kvm/mmu.c') diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 192e0df2fc8e..2491b40a294a 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -843,10 +843,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * unmapped afterwards, the call to kvm_unmap_hva will take it away * from us again properly. This smp_rmb() interacts with the smp_wmb() * in kvm_mmu_notifier_invalidate_. + * + * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is + * used to avoid unnecessary overhead introduced to locate the memory + * slot because it's always fixed even @gfn is adjusted for huge pages. */ smp_rmb(); - pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); + pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, + write_fault, &writable, NULL); if (pfn == KVM_PFN_ERR_HWPOISON) { kvm_send_hwpoison_signal(hva, vma_shift); return 0; @@ -912,7 +917,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, /* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) { kvm_set_pfn_dirty(pfn); - mark_page_dirty(kvm, gfn); + mark_page_dirty_in_slot(kvm, memslot, gfn); } out_unlock: -- cgit v1.2.3