diff options
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/arm.c | 8 | ||||
-rw-r--r-- | arch/arm64/kvm/handle_exit.c | 36 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/hyp-init.S | 12 | ||||
-rw-r--r-- | arch/arm64/kvm/mmio.c | 11 | ||||
-rw-r--r-- | arch/arm64/kvm/mmu.c | 72 | ||||
-rw-r--r-- | arch/arm64/kvm/pmu.c | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/pvtime.c | 15 | ||||
-rw-r--r-- | arch/arm64/kvm/reset.c | 10 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 81 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-v4.c | 8 |
10 files changed, 129 insertions, 131 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 98f05bdac3c1..691d21e4c717 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -254,6 +254,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu->arch.target = -1; bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); + vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; + /* Set up the timer */ kvm_timer_vcpu_init(vcpu); @@ -647,7 +649,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) return ret; if (run->exit_reason == KVM_EXIT_MMIO) { - ret = kvm_handle_mmio_return(vcpu, run); + ret = kvm_handle_mmio_return(vcpu); if (ret) return ret; } @@ -795,11 +797,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); /* Exit types that need handling before we can be preempted */ - handle_exit_early(vcpu, run, ret); + handle_exit_early(vcpu, ret); preempt_enable(); - ret = handle_exit(vcpu, run, ret); + ret = handle_exit(vcpu, ret); } /* Tell userspace about in-kernel device output levels */ diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 98ab33139982..fe6c7d79309d 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -25,7 +25,7 @@ #define CREATE_TRACE_POINTS #include "trace_handle_exit.h" -typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); +typedef int (*exit_handle_fn)(struct kvm_vcpu *); static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr) { @@ -33,7 +33,7 @@ static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr) kvm_inject_vabt(vcpu); } -static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int handle_hvc(struct kvm_vcpu *vcpu) { int ret; @@ -50,7 +50,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) return ret; } -static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int handle_smc(struct kvm_vcpu *vcpu) { /* * "If an SMC instruction executed at Non-secure EL1 is @@ -69,7 +69,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) * Guest access to FP/ASIMD registers are routed to this handler only * when the system doesn't support FP/ASIMD. */ -static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int handle_no_fpsimd(struct kvm_vcpu *vcpu) { kvm_inject_undefined(vcpu); return 1; @@ -87,7 +87,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run) * world-switches and schedule other host processes until there is an * incoming IRQ or FIQ to the VM. */ -static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int kvm_handle_wfx(struct kvm_vcpu *vcpu) { if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) { trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); @@ -109,16 +109,16 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) * kvm_handle_guest_debug - handle a debug exception instruction * * @vcpu: the vcpu pointer - * @run: access to the kvm_run structure for results * * We route all debug exceptions through the same handler. If both the * guest and host are using the same debug facilities it will be up to * userspace to re-inject the correct exception for guest delivery. * - * @return: 0 (while setting run->exit_reason), -1 for error + * @return: 0 (while setting vcpu->run->exit_reason), -1 for error */ -static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; u32 esr = kvm_vcpu_get_esr(vcpu); int ret = 0; @@ -144,7 +144,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) return ret; } -static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu) { u32 esr = kvm_vcpu_get_esr(vcpu); @@ -155,7 +155,7 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } -static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int handle_sve(struct kvm_vcpu *vcpu) { /* Until SVE is supported for guests: */ kvm_inject_undefined(vcpu); @@ -167,7 +167,7 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all * that we can do is give the guest an UNDEF. */ -static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu) { kvm_inject_undefined(vcpu); return 1; @@ -212,7 +212,7 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) * KVM_EXIT_DEBUG, otherwise userspace needs to complete its * emulation first. */ -static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) +static int handle_trap_exceptions(struct kvm_vcpu *vcpu) { int handled; @@ -227,7 +227,7 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) exit_handle_fn exit_handler; exit_handler = kvm_get_exit_handler(vcpu); - handled = exit_handler(vcpu, run); + handled = exit_handler(vcpu); } return handled; @@ -237,9 +237,10 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on * proper exit to userspace. */ -int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index) +int handle_exit(struct kvm_vcpu *vcpu, int exception_index) { + struct kvm_run *run = vcpu->run; + if (ARM_SERROR_PENDING(exception_index)) { u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); @@ -265,7 +266,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, case ARM_EXCEPTION_EL1_SERROR: return 1; case ARM_EXCEPTION_TRAP: - return handle_trap_exceptions(vcpu, run); + return handle_trap_exceptions(vcpu); case ARM_EXCEPTION_HYP_GONE: /* * EL2 has been reset to the hyp-stub. This happens when a guest @@ -289,8 +290,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, } /* For exit types that need handling before we can be preempted */ -void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index) +void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) { if (ARM_SERROR_PENDING(exception_index)) { if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) { diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 1587d146726a..d9434e90c06d 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -6,6 +6,7 @@ #include <linux/linkage.h> +#include <asm/alternative.h> #include <asm/assembler.h> #include <asm/kvm_arm.h> #include <asm/kvm_mmu.h> @@ -141,11 +142,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc) 1: cmp x0, #HVC_RESET_VECTORS b.ne 1f -reset: + /* - * Reset kvm back to the hyp stub. Do not clobber x0-x4 in - * case we coming via HVC_SOFT_RESTART. + * Set the HVC_RESET_VECTORS return code before entering the common + * path so that we do not clobber x0-x2 in case we are coming via + * HVC_SOFT_RESTART. */ + mov x0, xzr +reset: + /* Reset kvm back to the hyp stub. */ mrs x5, sctlr_el2 mov_q x6, SCTLR_ELx_FLAGS bic x5, x5, x6 // Clear SCTL_M and etc @@ -156,7 +161,6 @@ reset: /* Install stub vectors */ adr_l x5, __hyp_stub_vectors msr vbar_el2, x5 - mov x0, xzr eret 1: /* Bad stub call */ diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 58de2ae4f6bb..6a2826f1bf5e 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -77,9 +77,8 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len) * or in-kernel IO emulation * * @vcpu: The VCPU pointer - * @run: The VCPU run struct containing the mmio data */ -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) { unsigned long data; unsigned int len; @@ -92,6 +91,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) vcpu->mmio_needed = 0; if (!kvm_vcpu_dabt_iswrite(vcpu)) { + struct kvm_run *run = vcpu->run; + len = kvm_vcpu_dabt_get_as(vcpu); data = kvm_mmio_read_buf(run->mmio.data, len); @@ -119,9 +120,9 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) return 0; } -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa) +int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) { + struct kvm_run *run = vcpu->run; unsigned long data; unsigned long rt; int ret; @@ -182,7 +183,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, if (!is_write) memcpy(run->mmio.data, data_buf, len); vcpu->stat.mmio_exit_kernel++; - kvm_handle_mmio_return(vcpu, run); + kvm_handle_mmio_return(vcpu); return 1; } diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 05e0e03fbdf8..0121ef2c7c8d 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -127,38 +127,6 @@ static void stage2_dissolve_pud(struct kvm_s2_mmu *mmu, phys_addr_t addr, pud_t put_page(virt_to_page(pudp)); } -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - int min, int max) -{ - void *page; - - BUG_ON(max > KVM_NR_MEM_OBJS); - if (cache->nobjs >= min) - return 0; - while (cache->nobjs < max) { - page = (void *)__get_free_page(GFP_PGTABLE_USER); - if (!page) - return -ENOMEM; - cache->objects[cache->nobjs++] = page; - } - return 0; -} - -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) - free_page((unsigned long)mc->objects[--mc->nobjs]); -} - -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - BUG_ON(!mc || !mc->nobjs); - p = mc->objects[--mc->nobjs]; - return p; -} - static void clear_stage2_pgd_entry(struct kvm_s2_mmu *mmu, pgd_t *pgd, phys_addr_t addr) { struct kvm *kvm = mmu->kvm; @@ -1156,7 +1124,7 @@ static p4d_t *stage2_get_p4d(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache if (stage2_pgd_none(kvm, *pgd)) { if (!cache) return NULL; - p4d = mmu_memory_cache_alloc(cache); + p4d = kvm_mmu_memory_cache_alloc(cache); stage2_pgd_populate(kvm, pgd, p4d); get_page(virt_to_page(pgd)); } @@ -1175,7 +1143,7 @@ static pud_t *stage2_get_pud(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache if (stage2_p4d_none(kvm, *p4d)) { if (!cache) return NULL; - pud = mmu_memory_cache_alloc(cache); + pud = kvm_mmu_memory_cache_alloc(cache); stage2_p4d_populate(kvm, p4d, pud); get_page(virt_to_page(p4d)); } @@ -1197,7 +1165,7 @@ static pmd_t *stage2_get_pmd(struct kvm_s2_mmu *mmu, struct kvm_mmu_memory_cache if (stage2_pud_none(kvm, *pud)) { if (!cache) return NULL; - pmd = mmu_memory_cache_alloc(cache); + pmd = kvm_mmu_memory_cache_alloc(cache); stage2_pud_populate(kvm, pud, pmd); get_page(virt_to_page(pud)); } @@ -1356,7 +1324,7 @@ static bool stage2_get_leaf_entry(struct kvm_s2_mmu *mmu, phys_addr_t addr, return true; } -static bool stage2_is_exec(struct kvm_s2_mmu *mmu, phys_addr_t addr) +static bool stage2_is_exec(struct kvm_s2_mmu *mmu, phys_addr_t addr, unsigned long sz) { pud_t *pudp; pmd_t *pmdp; @@ -1368,11 +1336,11 @@ static bool stage2_is_exec(struct kvm_s2_mmu *mmu, phys_addr_t addr) return false; if (pudp) - return kvm_s2pud_exec(pudp); + return sz <= PUD_SIZE && kvm_s2pud_exec(pudp); else if (pmdp) - return kvm_s2pmd_exec(pmdp); + return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp); else - return kvm_s2pte_exec(ptep); + return sz == PAGE_SIZE && kvm_s2pte_exec(ptep); } static int stage2_set_pte(struct kvm_s2_mmu *mmu, @@ -1409,7 +1377,7 @@ static int stage2_set_pte(struct kvm_s2_mmu *mmu, if (stage2_pud_none(kvm, *pud)) { if (!cache) return 0; /* ignore calls from kvm_set_spte_hva */ - pmd = mmu_memory_cache_alloc(cache); + pmd = kvm_mmu_memory_cache_alloc(cache); stage2_pud_populate(kvm, pud, pmd); get_page(virt_to_page(pud)); } @@ -1434,7 +1402,7 @@ static int stage2_set_pte(struct kvm_s2_mmu *mmu, if (pmd_none(*pmd)) { if (!cache) return 0; /* ignore calls from kvm_set_spte_hva */ - pte = mmu_memory_cache_alloc(cache); + pte = kvm_mmu_memory_cache_alloc(cache); kvm_pmd_populate(pmd, pte); get_page(virt_to_page(pmd)); } @@ -1501,7 +1469,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t addr, end; int ret = 0; unsigned long pfn; - struct kvm_mmu_memory_cache cache = { 0, }; + struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, }; end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; pfn = __phys_to_pfn(pa); @@ -1512,9 +1480,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, if (writable) pte = kvm_s2pte_mkwrite(pte); - ret = mmu_topup_memory_cache(&cache, - kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + ret = kvm_mmu_topup_memory_cache(&cache, + kvm_mmu_cache_min_pages(kvm)); if (ret) goto out; spin_lock(&kvm->mmu_lock); @@ -1528,7 +1495,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, } out: - mmu_free_memory_cache(&cache); + kvm_mmu_free_memory_cache(&cache); return ret; } @@ -1919,8 +1886,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, mmap_read_unlock(current->mm); /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm), - KVM_NR_MEM_OBJS); + ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm)); if (ret) return ret; @@ -1995,7 +1961,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * execute permissions, and we preserve whatever we have. */ needs_exec = exec_fault || - (fault_status == FSC_PERM && stage2_is_exec(mmu, fault_ipa)); + (fault_status == FSC_PERM && + stage2_is_exec(mmu, fault_ipa, vma_pagesize)); if (vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); @@ -2086,7 +2053,6 @@ out: /** * kvm_handle_guest_abort - handles all 2nd stage aborts * @vcpu: the VCPU pointer - * @run: the kvm_run structure * * Any abort that gets to the host is almost guaranteed to be caused by a * missing second stage translation table entry, which can mean that either the @@ -2095,7 +2061,7 @@ out: * space. The distinction is based on the IPA causing the fault and whether this * memory region has been registered as standard RAM by user space. */ -int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) { unsigned long fault_status; phys_addr_t fault_ipa; @@ -2182,7 +2148,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) * of the page size. */ fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); - ret = io_mem_abort(vcpu, run, fault_ipa); + ret = io_mem_abort(vcpu, fault_ipa); goto out_unlock; } @@ -2351,7 +2317,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) { - mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); } phys_addr_t kvm_mmu_get_httbr(void) diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index b5ae3a5d509e..3c224162b3dd 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events) } /* - * On VHE ensure that only guest events have EL0 counting enabled + * On VHE ensure that only guest events have EL0 counting enabled. + * This is called from both vcpu_{load,put} and the sysreg handling. + * Since the latter is preemptible, special care must be taken to + * disable preemption. */ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) { @@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) if (!has_vhe()) return; + preempt_disable(); host = this_cpu_ptr(&kvm_host_data); events_guest = host->pmu_events.events_guest; events_host = host->pmu_events.events_host; kvm_vcpu_pmu_enable_el0(events_guest); kvm_vcpu_pmu_disable_el0(events_host); + preempt_enable(); } /* diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c index 1e0f4c284888..f7b52ce1557e 100644 --- a/arch/arm64/kvm/pvtime.c +++ b/arch/arm64/kvm/pvtime.c @@ -3,6 +3,7 @@ #include <linux/arm-smccc.h> #include <linux/kvm_host.h> +#include <linux/sched/stat.h> #include <asm/kvm_mmu.h> #include <asm/pvclock-abi.h> @@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) return base; } +static bool kvm_arm_pvtime_supported(void) +{ + return !!sched_info_on(); +} + int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { @@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, int ret = 0; int idx; - if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA) + if (!kvm_arm_pvtime_supported() || + attr->attr != KVM_ARM_VCPU_PVTIME_IPA) return -ENXIO; if (get_user(ipa, user)) @@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu, u64 __user *user = (u64 __user *)attr->addr; u64 ipa; - if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA) + if (!kvm_arm_pvtime_supported() || + attr->attr != KVM_ARM_VCPU_PVTIME_IPA) return -ENXIO; ipa = vcpu->arch.steal.base; @@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, { switch (attr->attr) { case KVM_ARM_VCPU_PVTIME_IPA: - return 0; + if (kvm_arm_pvtime_supported()) + return 0; } return -ENXIO; } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 92c3bba048ff..ee33875c5c2a 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -244,7 +244,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) */ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { - int ret = -EINVAL; + int ret; bool loaded; u32 pstate; @@ -268,15 +268,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { - if (kvm_vcpu_enable_ptrauth(vcpu)) + if (kvm_vcpu_enable_ptrauth(vcpu)) { + ret = -EINVAL; goto out; + } } switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { - if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) + if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) { + ret = -EINVAL; goto out; + } pstate = VCPU_RESET_PSTATE_SVC; } else { pstate = VCPU_RESET_PSTATE_EL1; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1fbab5f066bf..077293b5115f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1051,9 +1051,9 @@ static bool access_amu(struct kvm_vcpu *vcpu, struct sys_reg_params *p, /* Macro to expand the AMU counter and type registers*/ #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), access_amu } -#define AMU_AMEVTYPE0_EL0(n) { SYS_DESC(SYS_AMEVTYPE0_EL0(n)), access_amu } +#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), access_amu } #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), access_amu } -#define AMU_AMEVTYPE1_EL0(n) { SYS_DESC(SYS_AMEVTYPE1_EL0(n)), access_amu } +#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), access_amu } static bool trap_ptrauth(struct kvm_vcpu *vcpu, struct sys_reg_params *p, @@ -1657,22 +1657,22 @@ static const struct sys_reg_desc sys_reg_descs[] = { AMU_AMEVCNTR0_EL0(13), AMU_AMEVCNTR0_EL0(14), AMU_AMEVCNTR0_EL0(15), - AMU_AMEVTYPE0_EL0(0), - AMU_AMEVTYPE0_EL0(1), - AMU_AMEVTYPE0_EL0(2), - AMU_AMEVTYPE0_EL0(3), - AMU_AMEVTYPE0_EL0(4), - AMU_AMEVTYPE0_EL0(5), - AMU_AMEVTYPE0_EL0(6), - AMU_AMEVTYPE0_EL0(7), - AMU_AMEVTYPE0_EL0(8), - AMU_AMEVTYPE0_EL0(9), - AMU_AMEVTYPE0_EL0(10), - AMU_AMEVTYPE0_EL0(11), - AMU_AMEVTYPE0_EL0(12), - AMU_AMEVTYPE0_EL0(13), - AMU_AMEVTYPE0_EL0(14), - AMU_AMEVTYPE0_EL0(15), + AMU_AMEVTYPER0_EL0(0), + AMU_AMEVTYPER0_EL0(1), + AMU_AMEVTYPER0_EL0(2), + AMU_AMEVTYPER0_EL0(3), + AMU_AMEVTYPER0_EL0(4), + AMU_AMEVTYPER0_EL0(5), + AMU_AMEVTYPER0_EL0(6), + AMU_AMEVTYPER0_EL0(7), + AMU_AMEVTYPER0_EL0(8), + AMU_AMEVTYPER0_EL0(9), + AMU_AMEVTYPER0_EL0(10), + AMU_AMEVTYPER0_EL0(11), + AMU_AMEVTYPER0_EL0(12), + AMU_AMEVTYPER0_EL0(13), + AMU_AMEVTYPER0_EL0(14), + AMU_AMEVTYPER0_EL0(15), AMU_AMEVCNTR1_EL0(0), AMU_AMEVCNTR1_EL0(1), AMU_AMEVCNTR1_EL0(2), @@ -1689,22 +1689,22 @@ static const struct sys_reg_desc sys_reg_descs[] = { AMU_AMEVCNTR1_EL0(13), AMU_AMEVCNTR1_EL0(14), AMU_AMEVCNTR1_EL0(15), - AMU_AMEVTYPE1_EL0(0), - AMU_AMEVTYPE1_EL0(1), - AMU_AMEVTYPE1_EL0(2), - AMU_AMEVTYPE1_EL0(3), - AMU_AMEVTYPE1_EL0(4), - AMU_AMEVTYPE1_EL0(5), - AMU_AMEVTYPE1_EL0(6), - AMU_AMEVTYPE1_EL0(7), - AMU_AMEVTYPE1_EL0(8), - AMU_AMEVTYPE1_EL0(9), - AMU_AMEVTYPE1_EL0(10), - AMU_AMEVTYPE1_EL0(11), - AMU_AMEVTYPE1_EL0(12), - AMU_AMEVTYPE1_EL0(13), - AMU_AMEVTYPE1_EL0(14), - AMU_AMEVTYPE1_EL0(15), + AMU_AMEVTYPER1_EL0(0), + AMU_AMEVTYPER1_EL0(1), + AMU_AMEVTYPER1_EL0(2), + AMU_AMEVTYPER1_EL0(3), + AMU_AMEVTYPER1_EL0(4), + AMU_AMEVTYPER1_EL0(5), + AMU_AMEVTYPER1_EL0(6), + AMU_AMEVTYPER1_EL0(7), + AMU_AMEVTYPER1_EL0(8), + AMU_AMEVTYPER1_EL0(9), + AMU_AMEVTYPER1_EL0(10), + AMU_AMEVTYPER1_EL0(11), + AMU_AMEVTYPER1_EL0(12), + AMU_AMEVTYPER1_EL0(13), + AMU_AMEVTYPER1_EL0(14), + AMU_AMEVTYPER1_EL0(15), { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, @@ -2156,7 +2156,7 @@ static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg); } -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu) { kvm_inject_undefined(vcpu); return 1; @@ -2327,22 +2327,22 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, return 1; } -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu) { return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs)); } -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu) { return kvm_handle_cp_32(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); } -int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu) { return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs)); } -int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu) { return kvm_handle_cp_32(vcpu, cp14_regs, ARRAY_SIZE(cp14_regs)); } @@ -2392,9 +2392,8 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) /** * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access * @vcpu: The VCPU pointer - * @run: The kvm_run struct */ -int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) +int kvm_handle_sys_reg(struct kvm_vcpu *vcpu) { struct sys_reg_params params; unsigned long esr = kvm_vcpu_get_esr(vcpu); diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 27ac833e5ec7..b5fa73c9fd35 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) !irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) disable_irq_nosync(irq); + /* + * The v4.1 doorbell can fire concurrently with the vPE being + * made non-resident. Ensure we only update pending_last + * *after* the non-residency sequence has completed. + */ + raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; + raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); + kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_vcpu_kick(vcpu); |