diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2022-09-08 15:25:45 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-09-30 10:35:38 +0200 |
commit | e4335f53198fa0c0aefb2a38bb5518e94253412c (patch) | |
tree | cf01bb08beb8977c6100088470b1a93818f95ea1 | |
parent | Merge branch 'topic/ppc-kvm' into next (diff) | |
download | linux-e4335f53198fa0c0aefb2a38bb5518e94253412c.tar.xz linux-e4335f53198fa0c0aefb2a38bb5518e94253412c.zip |
KVM: PPC: Book3S HV: Implement scheduling wait interval counters in the VPA
PAPR specifies accumulated virtual processor wait intervals that relate
to partition scheduling interval times. Implement these counters in the
same way as they are repoted by dtl.
Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220908132545.4085849-5-npiggin@gmail.com
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 62 |
1 files changed, 41 insertions, 21 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ecce10a4486d..6ba68dd6190b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -733,16 +733,15 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) } static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, + struct lppaca *vpa, unsigned int pcpu, u64 now, unsigned long stolen) { struct dtl_entry *dt; - struct lppaca *vpa; dt = vcpu->arch.dtl_ptr; - vpa = vcpu->arch.vpa.pinned_addr; - if (!dt || !vpa) + if (!dt) return; dt->dispatch_reason = 7; @@ -763,29 +762,23 @@ static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, /* order writing *dt vs. writing vpa->dtl_idx */ smp_wmb(); vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); - vcpu->arch.dtl.dirty = true; -} - -static void kvmppc_create_dtl_entry_p9(struct kvm_vcpu *vcpu, - struct kvmppc_vcore *vc, - u64 now) -{ - unsigned long stolen; - stolen = vc->stolen_tb - vcpu->arch.stolen_logged; - vcpu->arch.stolen_logged = vc->stolen_tb; - - __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now, stolen); + /* vcpu->arch.dtl.dirty is set by the caller */ } -static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, - struct kvmppc_vcore *vc) +static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu, + struct kvmppc_vcore *vc) { + struct lppaca *vpa; unsigned long stolen; unsigned long core_stolen; u64 now; unsigned long flags; + vpa = vcpu->arch.vpa.pinned_addr; + if (!vpa) + return; + now = mftb(); core_stolen = vcore_stolen_time(vc, now); @@ -796,7 +789,34 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, vcpu->arch.busy_stolen = 0; spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); - __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen); + vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen); + + __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen); + + vcpu->arch.vpa.dirty = true; +} + +static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu, + struct kvmppc_vcore *vc, + u64 now) +{ + struct lppaca *vpa; + unsigned long stolen; + unsigned long stolen_delta; + + vpa = vcpu->arch.vpa.pinned_addr; + if (!vpa) + return; + + stolen = vc->stolen_tb; + stolen_delta = stolen - vcpu->arch.stolen_logged; + vcpu->arch.stolen_logged = stolen; + + vpa->enqueue_dispatch_tb = cpu_to_be64(stolen); + + __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta); + + vcpu->arch.vpa.dirty = true; } /* See if there is a doorbell interrupt pending for a vcpu */ @@ -3852,7 +3872,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) * kvmppc_core_prepare_to_enter. */ kvmppc_start_thread(vcpu, pvc); - kvmppc_create_dtl_entry(vcpu, pvc); + kvmppc_update_vpa_dispatch(vcpu, pvc); trace_kvm_guest_enter(vcpu); if (!vcpu->arch.ptid) thr0_done = true; @@ -4449,7 +4469,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) if ((vc->vcore_state == VCORE_PIGGYBACK || vc->vcore_state == VCORE_RUNNING) && !VCORE_IS_EXITING(vc)) { - kvmppc_create_dtl_entry(vcpu, vc); + kvmppc_update_vpa_dispatch(vcpu, vc); kvmppc_start_thread(vcpu, vc); trace_kvm_guest_enter(vcpu); } else if (vc->vcore_state == VCORE_SLEEPING) { @@ -4637,7 +4657,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, tb = mftb(); - kvmppc_create_dtl_entry_p9(vcpu, vc, tb + vc->tb_offset); + kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset); trace_kvm_guest_enter(vcpu); |