diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2018-06-01 19:17:22 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-06-01 19:17:22 +0200 |
commit | 5eec43a1fa2a7ec5225411c97538fa582d36f579 (patch) | |
tree | 22928042a707851ab4359eb45e7bda04a374f4d3 /arch/arm64/include/asm | |
parent | KVM: docs: mmu: Fix link to NPT presentation from KVM Forum 2008 (diff) | |
parent | KVM: arm/arm64: Bump VGIC_V3_MAX_CPUS to 512 (diff) | |
download | linux-5eec43a1fa2a7ec5225411c97538fa582d36f579.tar.xz linux-5eec43a1fa2a7ec5225411c97538fa582d36f579.zip |
Merge tag 'kvmarm-for-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/ARM updates for 4.18
- Lazy context-switching of FPSIMD registers on arm64
- Allow virtual redistributors to be part of two or more MMIO ranges
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 29 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimd.h | 21 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 45 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/thread_info.h | 13 |
6 files changed, 82 insertions, 49 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 09b0f2a80c8f..0a6b7133195e 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -11,9 +11,7 @@ #include <asm/cpucaps.h> #include <asm/cputype.h> -#include <asm/fpsimd.h> #include <asm/hwcap.h> -#include <asm/sigcontext.h> #include <asm/sysreg.h> /* @@ -510,33 +508,6 @@ static inline bool system_supports_sve(void) cpus_have_const_cap(ARM64_SVE); } -/* - * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE - * vector length. - * - * Use only if SVE is present. - * This function clobbers the SVE vector length. - */ -static inline u64 read_zcr_features(void) -{ - u64 zcr; - unsigned int vq_max; - - /* - * Set the maximum possible VL, and write zeroes to all other - * bits to see if they stick. - */ - sve_kernel_enable(NULL); - write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); - - zcr = read_sysreg_s(SYS_ZCR_EL1); - zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */ - vq_max = sve_vq_from_vl(sve_get_vl()); - zcr |= vq_max - 1; /* set LEN field to maximum effective value */ - - return zcr; -} - #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index aa7162ae93e3..fa92747a49c8 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -18,6 +18,8 @@ #include <asm/ptrace.h> #include <asm/errno.h> +#include <asm/processor.h> +#include <asm/sigcontext.h> #ifndef __ASSEMBLY__ @@ -41,6 +43,8 @@ struct task_struct; extern void fpsimd_save_state(struct user_fpsimd_state *state); extern void fpsimd_load_state(struct user_fpsimd_state *state); +extern void fpsimd_save(void); + extern void fpsimd_thread_switch(struct task_struct *next); extern void fpsimd_flush_thread(void); @@ -49,12 +53,27 @@ extern void fpsimd_preserve_current_state(void); extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); +extern void fpsimd_bind_task_to_cpu(void); +extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state); + extern void fpsimd_flush_task_state(struct task_struct *target); +extern void fpsimd_flush_cpu_state(void); extern void sve_flush_cpu_state(void); /* Maximum VL that SVE VL-agnostic software can transparently support */ #define SVE_VL_ARCH_MAX 0x100 +/* Offset of FFR in the SVE register dump */ +static inline size_t sve_ffr_offset(int vl) +{ + return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET; +} + +static inline void *sve_pffr(struct thread_struct *thread) +{ + return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl); +} + extern void sve_save_state(void *state, u32 *pfpsr); extern void sve_load_state(void const *state, u32 const *pfpsr, unsigned long vq_minus_1); @@ -63,6 +82,8 @@ extern unsigned int sve_get_vl(void); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern u64 read_zcr_features(void); + extern int __ro_after_init sve_max_vl; #ifdef CONFIG_ARM64_SVE diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f6648a3e4152..821a7032c0f7 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -30,19 +30,19 @@ /* The hyp-stub will return this for any kvm_call_hyp() call */ #define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR -#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 -#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) +#ifndef __ASSEMBLY__ + +#include <linux/mm.h> /* Translate a kernel address of @sym into its equivalent linear mapping */ #define kvm_ksym_ref(sym) \ ({ \ void *val = &sym; \ if (!is_kernel_in_hyp_mode()) \ - val = phys_to_virt((u64)&sym - kimage_voffset); \ + val = lm_alias(&sym); \ val; \ }) -#ifndef __ASSEMBLY__ struct kvm; struct kvm_vcpu; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 469de8acd06f..a4ca202ff3f2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,6 +30,7 @@ #include <asm/kvm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmio.h> +#include <asm/thread_info.h> #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -216,8 +217,8 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Guest debug state */ - u64 debug_flags; + /* Miscellaneous vcpu state flags */ + u64 flags; /* * We maintain more than a single set of debug registers to support @@ -238,6 +239,10 @@ struct kvm_vcpu_arch { /* Pointer to host CPU context */ kvm_cpu_context_t *host_cpu_context; + + struct thread_info *host_thread_info; /* hyp VA */ + struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ + struct { /* {Break,watch}point registers */ struct kvm_guest_debug_arch regs; @@ -293,6 +298,12 @@ struct kvm_vcpu_arch { bool sysregs_loaded_on_cpu; }; +/* vcpu_arch flags field values: */ +#define KVM_ARM64_DEBUG_DIRTY (1 << 0) +#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ +#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ +#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ + #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) /* @@ -394,6 +405,19 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, kvm_call_hyp(__kvm_set_tpidr_el2, tpidr_el2); } +static inline bool kvm_arch_check_sve_has_vhe(void) +{ + /* + * The Arm architecture specifies that implementation of SVE + * requires VHE also to be implemented. The KVM code for arm64 + * relies on this when SVE is present: + */ + if (system_supports_sve()) + return has_vhe(); + else + return true; +} + static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} @@ -420,15 +444,18 @@ static inline void __cpu_init_stage2(void) "PARange is %d bits, unsupported configuration!", parange); } -/* - * All host FP/SIMD state is restored on guest exit, so nothing needs - * doing here except in the SVE case: -*/ -static inline void kvm_fpsimd_flush_cpu_state(void) +/* Guest/host FPSIMD coordination helpers */ +int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); +void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); + +#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ +static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { - if (system_supports_sve()) - sve_flush_cpu_state(); + return kvm_arch_vcpu_run_map_fp(vcpu); } +#endif static inline void kvm_arm_vhe_guest_enter(void) { diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 767598932549..c99e657fdd57 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -156,7 +156,9 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, /* Sync TPIDR_EL0 back to thread_struct for current */ void tls_preserve_current_state(void); -#define INIT_THREAD { } +#define INIT_THREAD { \ + .fpsimd_cpu = NR_CPUS, \ +} static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) { @@ -244,6 +246,17 @@ void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused); void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused); void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused); +/* + * Not at the top of the file due to a direct #include cycle between + * <asm/fpsimd.h> and <asm/processor.h>. Deferring this #include + * ensures that contents of processor.h are visible to fpsimd.h even if + * processor.h is included first. + * + * These prctl helpers are the only things in this file that require + * fpsimd.h. The core code expects them to be in this header. + */ +#include <asm/fpsimd.h> + /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 740aa03c5f0d..af271f9a6c9f 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -45,12 +45,6 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => bug */ }; -#define INIT_THREAD_INFO(tsk) \ -{ \ - .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ -} - #define thread_saved_pc(tsk) \ ((unsigned long)(tsk->thread.cpu_context.pc)) #define thread_saved_sp(tsk) \ @@ -117,5 +111,12 @@ void arch_release_task_struct(struct task_struct *tsk); _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_NOHZ) +#define INIT_THREAD_INFO(tsk) \ +{ \ + .flags = _TIF_FOREIGN_FPSTATE, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ +} + #endif /* __KERNEL__ */ #endif /* __ASM_THREAD_INFO_H */ |