summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/arm.c
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2021-10-14 13:13:06 +0200
committerMarc Zyngier <maz@kernel.org>2021-12-01 12:51:22 +0100
commitcc5705fb1bf119ebb693d594f0157e0dd418590e (patch)
tree11430bf3f00bb0e65dd89c1d2548c254fb36768e /arch/arm64/kvm/arm.c
parentKVM: arm64: Merge kvm_arch_vcpu_run_pid_change() and kvm_vcpu_first_run_init() (diff)
downloadlinux-cc5705fb1bf119ebb693d594f0157e0dd418590e.tar.xz
linux-cc5705fb1bf119ebb693d594f0157e0dd418590e.zip
KVM: arm64: Drop vcpu->arch.has_run_once for vcpu->pid
With the transition to kvm_arch_vcpu_run_pid_change() to handle the "run once" activities, it becomes obvious that has_run_once is now an exact shadow of vcpu->pid. Replace vcpu->arch.has_run_once with a new vcpu_has_run_once() helper that directly checks for vcpu->pid, and get rid of the now unused field. Reviewed-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/arm.c')
-rw-r--r--arch/arm64/kvm/arm.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index f20a265972c7..c79d8e5230bc 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -351,7 +351,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
@@ -609,7 +609,7 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (ret)
return ret;
- if (likely(vcpu->arch.has_run_once))
+ if (likely(vcpu_has_run_once(vcpu)))
return 0;
kvm_arm_vcpu_init_debug(vcpu);
@@ -640,8 +640,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
static_branch_inc(&userspace_irqchip_in_use);
}
- vcpu->arch.has_run_once = true;
-
/*
* Initialize traps for protected VMs.
* NOTE: Move to run in EL2 directly, rather than via a hypercall, once
@@ -1132,7 +1130,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* need to invalidate the I-cache though, as FWB does *not*
* imply CTR_EL0.DIC.
*/
- if (vcpu->arch.has_run_once) {
+ if (vcpu_has_run_once(vcpu)) {
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
else